mirror of
https://github.com/juanfont/headscale.git
synced 2026-02-12 10:47:42 +01:00
Compare commits
62 Commits
kradalby/3
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e0d8c3c877 | ||
|
|
c1b468f9f4 | ||
|
|
900f4b7b75 | ||
|
|
64f23136a2 | ||
|
|
0f6d312ada | ||
|
|
20dff82f95 | ||
|
|
31c4331a91 | ||
|
|
ce580f8245 | ||
|
|
bfb6fd80df | ||
|
|
3acce2da87 | ||
|
|
4a9a329339 | ||
|
|
dd16567c52 | ||
|
|
e0a436cefc | ||
|
|
53cdeff129 | ||
|
|
7148a690d0 | ||
|
|
4e73133b9f | ||
|
|
4f8724151e | ||
|
|
91730e2a1d | ||
|
|
b5090a01ec | ||
|
|
27f5641341 | ||
|
|
cf3d30b6f6 | ||
|
|
58020696fe | ||
|
|
e44b402fe4 | ||
|
|
835b7eb960 | ||
|
|
95b1fd636e | ||
|
|
834ac27779 | ||
|
|
4a4032a4b0 | ||
|
|
29aa08df0e | ||
|
|
0b1727c337 | ||
|
|
08fe2e4d6c | ||
|
|
cb29cade46 | ||
|
|
f27298c759 | ||
|
|
8baa14ef4a | ||
|
|
ebdbe03639 | ||
|
|
f735502eae | ||
|
|
53d17aa321 | ||
|
|
14f833bdb9 | ||
|
|
9e50071df9 | ||
|
|
c907b0d323 | ||
|
|
97fa117c48 | ||
|
|
b5329ff0f3 | ||
|
|
eac8a57bce | ||
|
|
44af046196 | ||
|
|
4a744f423b | ||
|
|
ca75e096e6 | ||
|
|
ce7c256d1e | ||
|
|
4912ceaaf5 | ||
|
|
d7f7f2c85e | ||
|
|
df184e5276 | ||
|
|
0630fd32e5 | ||
|
|
306aabbbce | ||
|
|
a09b0d1d69 | ||
|
|
362696a5ef | ||
|
|
1f32c8bf61 | ||
|
|
fb137a8fe3 | ||
|
|
c2f28efbd7 | ||
|
|
11f0d4cfdd | ||
|
|
5d300273dc | ||
|
|
7f003ecaff | ||
|
|
2695d1527e | ||
|
|
d32f6707f7 | ||
|
|
89e436f0e6 |
6
.github/ISSUE_TEMPLATE/bug_report.yaml
vendored
6
.github/ISSUE_TEMPLATE/bug_report.yaml
vendored
@@ -6,8 +6,7 @@ body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is this a support request?
|
||||
description:
|
||||
This issue tracker is for bugs and feature requests only. If you need
|
||||
description: This issue tracker is for bugs and feature requests only. If you need
|
||||
help, please use ask in our Discord community
|
||||
options:
|
||||
- label: This is not a support request
|
||||
@@ -15,8 +14,7 @@ body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for this?
|
||||
description:
|
||||
Please search to see if an issue already exists for the bug you
|
||||
description: Please search to see if an issue already exists for the bug you
|
||||
encountered.
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
8
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -3,9 +3,9 @@ blank_issues_enabled: false
|
||||
|
||||
# Contact links
|
||||
contact_links:
|
||||
- name: "headscale usage documentation"
|
||||
url: "https://github.com/juanfont/headscale/blob/main/docs"
|
||||
about: "Find documentation about how to configure and run headscale."
|
||||
- name: "headscale Discord community"
|
||||
url: "https://discord.gg/xGj2TuqyxY"
|
||||
url: "https://discord.gg/c84AZQhmpx"
|
||||
about: "Please ask and answer questions about usage of headscale here."
|
||||
- name: "headscale usage documentation"
|
||||
url: "https://headscale.net/"
|
||||
about: "Find documentation about how to configure and run headscale."
|
||||
|
||||
80
.github/label-response/needs-more-info.md
vendored
Normal file
80
.github/label-response/needs-more-info.md
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
Thank you for taking the time to report this issue.
|
||||
|
||||
To help us investigate and resolve this, we need more information. Please provide the following:
|
||||
|
||||
> [!TIP]
|
||||
> Most issues turn out to be configuration errors rather than bugs. We encourage you to discuss your problem in our [Discord community](https://discord.gg/c84AZQhmpx) **before** opening an issue. The community can often help identify misconfigurations quickly, saving everyone time.
|
||||
|
||||
## Required Information
|
||||
|
||||
### Environment Details
|
||||
|
||||
- **Headscale version**: (run `headscale version`)
|
||||
- **Tailscale client version**: (run `tailscale version`)
|
||||
- **Operating System**: (e.g., Ubuntu 24.04, macOS 14, Windows 11)
|
||||
- **Deployment method**: (binary, Docker, Kubernetes, etc.)
|
||||
- **Reverse proxy**: (if applicable: nginx, Traefik, Caddy, etc. - include configuration)
|
||||
|
||||
### Debug Information
|
||||
|
||||
Please follow our [Debugging and Troubleshooting Guide](https://headscale.net/stable/ref/debug/) and provide:
|
||||
|
||||
1. **Client netmap dump** (from affected Tailscale client):
|
||||
|
||||
```bash
|
||||
tailscale debug netmap > netmap.json
|
||||
```
|
||||
|
||||
2. **Client status dump** (from affected Tailscale client):
|
||||
|
||||
```bash
|
||||
tailscale status --json > status.json
|
||||
```
|
||||
|
||||
3. **Tailscale client logs** (if experiencing client issues):
|
||||
|
||||
```bash
|
||||
tailscale debug daemon-logs
|
||||
```
|
||||
|
||||
> [!IMPORTANT]
|
||||
> We need logs from **multiple nodes** to understand the full picture:
|
||||
>
|
||||
> - The node(s) initiating connections
|
||||
> - The node(s) being connected to
|
||||
>
|
||||
> Without logs from both sides, we cannot diagnose connectivity issues.
|
||||
|
||||
4. **Headscale server logs** with `log.level: trace` enabled
|
||||
|
||||
5. **Headscale configuration** (with sensitive values redacted - see rules below)
|
||||
|
||||
6. **ACL/Policy configuration** (if using ACLs)
|
||||
|
||||
7. **Proxy/Docker configuration** (if applicable - nginx.conf, docker-compose.yml, Traefik config, etc.)
|
||||
|
||||
## Formatting Requirements
|
||||
|
||||
- **Attach long files** - Do not paste large logs or configurations inline. Use GitHub file attachments or GitHub Gists.
|
||||
- **Use proper Markdown** - Format code blocks, logs, and configurations with appropriate syntax highlighting.
|
||||
- **Structure your response** - Use the headings above to organize your information clearly.
|
||||
|
||||
## Redaction Rules
|
||||
|
||||
> [!CAUTION]
|
||||
> **Replace, do not remove.** Removing information makes debugging impossible.
|
||||
|
||||
When redacting sensitive information:
|
||||
|
||||
- ✅ **Replace consistently** - If you change `alice@company.com` to `user1@example.com`, use `user1@example.com` everywhere (logs, config, policy, etc.)
|
||||
- ✅ **Use meaningful placeholders** - `user1@example.com`, `bob@example.com`, `my-secret-key` are acceptable
|
||||
- ❌ **Never remove information** - Gaps in data prevent us from correlating events across logs
|
||||
- ❌ **Never redact IP addresses** - We need the actual IPs to trace network paths and identify issues
|
||||
|
||||
**If redaction rules are not followed, we will be unable to debug the issue and will have to close it.**
|
||||
|
||||
---
|
||||
|
||||
**Note:** This issue will be automatically closed in 3 days if no additional information is provided. Once you reply with the requested information, the `needs-more-info` label will be removed automatically.
|
||||
|
||||
If you need help gathering this information, please visit our [Discord community](https://discord.gg/c84AZQhmpx).
|
||||
15
.github/label-response/support-request.md
vendored
Normal file
15
.github/label-response/support-request.md
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
Thank you for reaching out.
|
||||
|
||||
This issue tracker is used for **bug reports and feature requests** only. Your question appears to be a support or configuration question rather than a bug report.
|
||||
|
||||
For help with setup, configuration, or general questions, please visit our [Discord community](https://discord.gg/c84AZQhmpx) where the community and maintainers can assist you in real-time.
|
||||
|
||||
**Before posting in Discord, please check:**
|
||||
|
||||
- [Documentation](https://headscale.net/)
|
||||
- [FAQ](https://headscale.net/stable/faq/)
|
||||
- [Debugging and Troubleshooting Guide](https://headscale.net/stable/ref/debug/)
|
||||
|
||||
If after troubleshooting you determine this is actually a bug, please open a new issue with the required debug information from the troubleshooting guide.
|
||||
|
||||
This issue has been automatically closed.
|
||||
28
.github/workflows/needs-more-info-comment.yml
vendored
Normal file
28
.github/workflows/needs-more-info-comment.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Needs More Info - Post Comment
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
post-comment:
|
||||
if: >-
|
||||
github.event.label.name == 'needs-more-info' &&
|
||||
github.repository == 'juanfont/headscale'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
sparse-checkout: .github/label-response/needs-more-info.md
|
||||
sparse-checkout-cone-mode: false
|
||||
|
||||
- name: Post instruction comment
|
||||
run: gh issue comment "$NUMBER" --body-file .github/label-response/needs-more-info.md
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
NUMBER: ${{ github.event.issue.number }}
|
||||
31
.github/workflows/needs-more-info-timer.yml
vendored
Normal file
31
.github/workflows/needs-more-info-timer.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Needs More Info - Timer
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *" # Daily at midnight UTC
|
||||
issue_comment:
|
||||
types: [created]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
manage-needs-more-info:
|
||||
if: >-
|
||||
github.repository == 'juanfont/headscale' &&
|
||||
(github.event_name != 'issue_comment' || github.event.comment.user.type != 'Bot')
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- name: Manage needs-more-info issues
|
||||
uses: tiangolo/issue-manager@2fb3484ec9279485df8659e8ec73de262431737d # v0.6.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
config: >
|
||||
{
|
||||
"needs-more-info": {
|
||||
"delay": "P3D",
|
||||
"message": "This issue has been automatically closed because no additional information was provided within 3 days.\n\nIf you now have the requested information, please feel free to reopen this issue and provide the details. We're happy to help once we have enough context to investigate.\n\nThank you for your understanding.",
|
||||
"remove_label_on_comment": true,
|
||||
"remove_label_on_close": true
|
||||
}
|
||||
}
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -23,5 +23,5 @@ jobs:
|
||||
since being marked as stale."
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
exempt-issue-labels: "no-stale-bot"
|
||||
exempt-issue-labels: "no-stale-bot,needs-more-info"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
30
.github/workflows/support-request.yml
vendored
Normal file
30
.github/workflows/support-request.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Support Request - Close Issue
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
close-support-request:
|
||||
if: >-
|
||||
github.event.label.name == 'support-request' &&
|
||||
github.repository == 'juanfont/headscale'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
sparse-checkout: .github/label-response/support-request.md
|
||||
sparse-checkout-cone-mode: false
|
||||
|
||||
- name: Post comment and close issue
|
||||
run: |
|
||||
gh issue comment "$NUMBER" --body-file .github/label-response/support-request.md
|
||||
gh issue close "$NUMBER" --reason "not planned"
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
NUMBER: ${{ github.event.issue.number }}
|
||||
1
.github/workflows/test-integration.yaml
vendored
1
.github/workflows/test-integration.yaml
vendored
@@ -247,6 +247,7 @@ jobs:
|
||||
- TestTagsUserLoginReauthWithEmptyTagsRemovesAllTags
|
||||
- TestTagsAuthKeyWithoutUserInheritsTags
|
||||
- TestTagsAuthKeyWithoutUserRejectsAdvertisedTags
|
||||
- TestTagsAuthKeyConvertToUserViaCLIRegister
|
||||
uses: ./.github/workflows/integration-test-template.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
|
||||
@@ -18,6 +18,7 @@ linters:
|
||||
- lll
|
||||
- maintidx
|
||||
- makezero
|
||||
- mnd
|
||||
- musttag
|
||||
- nestif
|
||||
- nolintlint
|
||||
@@ -37,6 +38,23 @@ linters:
|
||||
time.Sleep is forbidden.
|
||||
In tests: use assert.EventuallyWithT for polling/waiting patterns.
|
||||
In production code: use a backoff strategy (e.g., cenkalti/backoff) or proper synchronization primitives.
|
||||
# Forbid inline string literals in zerolog field methods - use zf.* constants
|
||||
- pattern: '\.(Str|Int|Int8|Int16|Int32|Int64|Uint|Uint8|Uint16|Uint32|Uint64|Float32|Float64|Bool|Dur|Time|TimeDiff|Strs|Ints|Uints|Floats|Bools|Any|Interface)\("[^"]+"'
|
||||
msg: >-
|
||||
Use zf.* constants for zerolog field names instead of string literals.
|
||||
Import "github.com/juanfont/headscale/hscontrol/util/zlog/zf" and use
|
||||
constants like zf.NodeID, zf.UserName, etc. Add new constants to
|
||||
hscontrol/util/zlog/zf/fields.go if needed.
|
||||
# Forbid ptr.To - use Go 1.26 new(expr) instead
|
||||
- pattern: 'ptr\.To\('
|
||||
msg: >-
|
||||
ptr.To is forbidden. Use Go 1.26's new(expr) syntax instead.
|
||||
Example: ptr.To(value) → new(value)
|
||||
# Forbid tsaddr.SortPrefixes - use slices.SortFunc with netip.Prefix.Compare
|
||||
- pattern: 'tsaddr\.SortPrefixes'
|
||||
msg: >-
|
||||
tsaddr.SortPrefixes is forbidden. Use Go 1.26's netip.Prefix.Compare instead.
|
||||
Example: slices.SortFunc(prefixes, netip.Prefix.Compare)
|
||||
analyze-types: true
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
version: 2
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy -compat=1.25
|
||||
- go mod tidy -compat=1.26
|
||||
- go mod vendor
|
||||
|
||||
release:
|
||||
|
||||
@@ -43,26 +43,12 @@ repos:
|
||||
entry: prettier --write --list-different
|
||||
language: system
|
||||
exclude: ^docs/
|
||||
types_or:
|
||||
[
|
||||
javascript,
|
||||
jsx,
|
||||
ts,
|
||||
tsx,
|
||||
yaml,
|
||||
json,
|
||||
toml,
|
||||
html,
|
||||
css,
|
||||
scss,
|
||||
sass,
|
||||
markdown,
|
||||
]
|
||||
types_or: [javascript, jsx, ts, tsx, yaml, json, toml, html, css, scss, sass, markdown]
|
||||
|
||||
# golangci-lint for Go code quality
|
||||
- id: golangci-lint
|
||||
name: golangci-lint
|
||||
entry: nix develop --command golangci-lint run --new-from-rev=HEAD~1 --timeout=5m --fix
|
||||
entry: golangci-lint run --new-from-rev=HEAD~1 --timeout=5m --fix
|
||||
language: system
|
||||
types: [go]
|
||||
pass_filenames: false
|
||||
|
||||
36
CHANGELOG.md
36
CHANGELOG.md
@@ -1,6 +1,36 @@
|
||||
# CHANGELOG
|
||||
|
||||
## 0.28.0 (202x-xx-xx)
|
||||
## 0.29.0 (202x-xx-xx)
|
||||
|
||||
**Minimum supported Tailscale client version: v1.76.0**
|
||||
|
||||
### Tailscale ACL compatibility improvements
|
||||
|
||||
Extensive test cases were systematically generated using Tailscale clients and the official SaaS
|
||||
to understand how the packet filter should be generated. We discovered a few differences, but
|
||||
overall our implementation was very close.
|
||||
[#3036](https://github.com/juanfont/headscale/pull/3036)
|
||||
|
||||
### BREAKING
|
||||
|
||||
- **ACL Policy**: Wildcard (`*`) in ACL sources and destinations now resolves to Tailscale's CGNAT range (`100.64.0.0/10`) and ULA range (`fd7a:115c:a1e0::/48`) instead of all IPs (`0.0.0.0/0` and `::/0`) [#3036](https://github.com/juanfont/headscale/pull/3036)
|
||||
- This better matches Tailscale's security model where `*` means "any node in the tailnet" rather than "any IP address"
|
||||
- Policies relying on wildcard to match non-Tailscale IPs will need to use explicit CIDR ranges instead
|
||||
- **Note**: Users with non-standard IP ranges configured in `prefixes.ipv4` or `prefixes.ipv6` (which is unsupported and produces a warning) will need to explicitly specify their CIDR ranges in ACL rules instead of using `*`
|
||||
- **ACL Policy**: Validate autogroup:self source restrictions matching Tailscale behavior - tags, hosts, and IPs are rejected as sources for autogroup:self destinations [#3036](https://github.com/juanfont/headscale/pull/3036)
|
||||
- Policies using tags, hosts, or IP addresses as sources for autogroup:self destinations will now fail validation
|
||||
- **ACL Policy**: The `proto:icmp` protocol name now only includes ICMPv4 (protocol 1), matching Tailscale behavior [#3036](https://github.com/juanfont/headscale/pull/3036)
|
||||
- Previously, `proto:icmp` included both ICMPv4 and ICMPv6
|
||||
- Use `proto:ipv6-icmp` or protocol number `58` explicitly for ICMPv6
|
||||
|
||||
### Changes
|
||||
|
||||
- **ACL Policy**: Add ICMP and IPv6-ICMP protocols to default filter rules when no protocol is specified [#3036](https://github.com/juanfont/headscale/pull/3036)
|
||||
- **ACL Policy**: Fix autogroup:self handling for tagged nodes - tagged nodes no longer incorrectly receive autogroup:self filter rules [#3036](https://github.com/juanfont/headscale/pull/3036)
|
||||
- **ACL Policy**: Use CIDR format for autogroup:self destination IPs matching Tailscale behavior [#3036](https://github.com/juanfont/headscale/pull/3036)
|
||||
- **ACL Policy**: Merge filter rules with identical SrcIPs and IPProto matching Tailscale behavior - multiple ACL rules with the same source now produce a single FilterRule with combined DstPorts [#3036](https://github.com/juanfont/headscale/pull/3036)
|
||||
|
||||
## 0.28.0 (2026-02-04)
|
||||
|
||||
**Minimum supported Tailscale client version: v1.74.0**
|
||||
|
||||
@@ -162,9 +192,7 @@ sequentially through each stable release, selecting the latest patch version ava
|
||||
- Fix autogroup:self preventing visibility of nodes matched by other ACL rules [#2882](https://github.com/juanfont/headscale/pull/2882)
|
||||
- Fix nodes being rejected after pre-authentication key expiration [#2917](https://github.com/juanfont/headscale/pull/2917)
|
||||
- Fix list-routes command respecting identifier filter with JSON output [#2927](https://github.com/juanfont/headscale/pull/2927)
|
||||
- **API Key CLI**: Add `--id` flag to expire/delete commands as alternative to `--prefix` [#3016](https://github.com/juanfont/headscale/pull/3016)
|
||||
- `headscale apikeys expire --id <ID>` or `--prefix <PREFIX>`
|
||||
- `headscale apikeys delete --id <ID>` or `--prefix <PREFIX>`
|
||||
- Add `--id` flag to expire/delete commands as alternative to `--prefix` for API Keys [#3016](https://github.com/juanfont/headscale/pull/3016)
|
||||
|
||||
## 0.27.1 (2025-11-11)
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# For testing purposes only
|
||||
|
||||
FROM golang:alpine AS build-env
|
||||
FROM golang:1.26rc2-alpine AS build-env
|
||||
|
||||
WORKDIR /go/src
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# and are in no way endorsed by Headscale's maintainers as an
|
||||
# official nor supported release or distribution.
|
||||
|
||||
FROM docker.io/golang:1.25-trixie AS builder
|
||||
FROM docker.io/golang:1.26rc2-trixie AS builder
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# This Dockerfile is more or less lifted from tailscale/tailscale
|
||||
# to ensure a similar build process when testing the HEAD of tailscale.
|
||||
|
||||
FROM golang:1.25-alpine AS build-env
|
||||
FROM golang:1.26rc2-alpine AS build-env
|
||||
|
||||
WORKDIR /go/src
|
||||
|
||||
|
||||
@@ -67,6 +67,8 @@ For NixOS users, a module is available in [`nix/`](./nix/).
|
||||
|
||||
## Talks
|
||||
|
||||
- Fosdem 2026 (video): [Headscale & Tailscale: The complementary open source clone](https://fosdem.org/2026/schedule/event/KYQ3LL-headscale-the-complementary-open-source-clone/)
|
||||
- presented by Kristoffer Dalby
|
||||
- Fosdem 2023 (video): [Headscale: How we are using integration testing to reimplement Tailscale](https://fosdem.org/2023/schedule/event/goheadscale/)
|
||||
- presented by Juan Font Alonso and Kristoffer Dalby
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// 90 days.
|
||||
// DefaultAPIKeyExpiry is 90 days.
|
||||
DefaultAPIKeyExpiry = "90d"
|
||||
)
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ var configTestCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
_, err := newHeadscaleServerWithConfig()
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msg("Error initializing")
|
||||
log.Fatal().Caller().Err(err).Msg("error initializing")
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -19,10 +19,12 @@ func init() {
|
||||
rootCmd.AddCommand(debugCmd)
|
||||
|
||||
createNodeCmd.Flags().StringP("name", "", "", "Name")
|
||||
|
||||
err := createNodeCmd.MarkFlagRequired("name")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
|
||||
createNodeCmd.Flags().StringP("user", "u", "", "User")
|
||||
|
||||
createNodeCmd.Flags().StringP("namespace", "n", "", "User")
|
||||
@@ -34,11 +36,14 @@ func init() {
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
|
||||
createNodeCmd.Flags().StringP("key", "k", "", "Key")
|
||||
|
||||
err = createNodeCmd.MarkFlagRequired("key")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
|
||||
createNodeCmd.Flags().
|
||||
StringSliceP("route", "r", []string{}, "List (or repeated flags) of routes to advertise")
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/util/zlog/zf"
|
||||
"github.com/oauth2-proxy/mockoidc"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -19,6 +20,7 @@ const (
|
||||
errMockOidcClientIDNotDefined = Error("MOCKOIDC_CLIENT_ID not defined")
|
||||
errMockOidcClientSecretNotDefined = Error("MOCKOIDC_CLIENT_SECRET not defined")
|
||||
errMockOidcPortNotDefined = Error("MOCKOIDC_PORT not defined")
|
||||
errMockOidcUsersNotDefined = Error("MOCKOIDC_USERS not defined")
|
||||
refreshTTL = 60 * time.Minute
|
||||
)
|
||||
|
||||
@@ -35,7 +37,7 @@ var mockOidcCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := mockOIDC()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error running mock OIDC server")
|
||||
log.Error().Err(err).Msgf("error running mock OIDC server")
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
@@ -46,41 +48,47 @@ func mockOIDC() error {
|
||||
if clientID == "" {
|
||||
return errMockOidcClientIDNotDefined
|
||||
}
|
||||
|
||||
clientSecret := os.Getenv("MOCKOIDC_CLIENT_SECRET")
|
||||
if clientSecret == "" {
|
||||
return errMockOidcClientSecretNotDefined
|
||||
}
|
||||
|
||||
addrStr := os.Getenv("MOCKOIDC_ADDR")
|
||||
if addrStr == "" {
|
||||
return errMockOidcPortNotDefined
|
||||
}
|
||||
|
||||
portStr := os.Getenv("MOCKOIDC_PORT")
|
||||
if portStr == "" {
|
||||
return errMockOidcPortNotDefined
|
||||
}
|
||||
|
||||
accessTTLOverride := os.Getenv("MOCKOIDC_ACCESS_TTL")
|
||||
if accessTTLOverride != "" {
|
||||
newTTL, err := time.ParseDuration(accessTTLOverride)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
accessTTL = newTTL
|
||||
}
|
||||
|
||||
userStr := os.Getenv("MOCKOIDC_USERS")
|
||||
if userStr == "" {
|
||||
return errors.New("MOCKOIDC_USERS not defined")
|
||||
return errMockOidcUsersNotDefined
|
||||
}
|
||||
|
||||
var users []mockoidc.MockUser
|
||||
|
||||
err := json.Unmarshal([]byte(userStr), &users)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unmarshalling users: %w", err)
|
||||
}
|
||||
|
||||
log.Info().Interface("users", users).Msg("loading users from JSON")
|
||||
log.Info().Interface(zf.Users, users).Msg("loading users from JSON")
|
||||
|
||||
log.Info().Msgf("Access token TTL: %s", accessTTL)
|
||||
log.Info().Msgf("access token TTL: %s", accessTTL)
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
@@ -92,7 +100,7 @@ func mockOIDC() error {
|
||||
return err
|
||||
}
|
||||
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", addrStr, port))
|
||||
listener, err := new(net.ListenConfig).Listen(context.Background(), "tcp", fmt.Sprintf("%s:%d", addrStr, port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -101,8 +109,10 @@ func mockOIDC() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info().Msgf("Mock OIDC server listening on %s", listener.Addr().String())
|
||||
log.Info().Msgf("Issuer: %s", mock.Issuer())
|
||||
|
||||
log.Info().Msgf("mock OIDC server listening on %s", listener.Addr().String())
|
||||
log.Info().Msgf("issuer: %s", mock.Issuer())
|
||||
|
||||
c := make(chan struct{})
|
||||
<-c
|
||||
|
||||
@@ -133,12 +143,13 @@ func getMockOIDC(clientID string, clientSecret string, users []mockoidc.MockUser
|
||||
ErrorQueue: &mockoidc.ErrorQueue{},
|
||||
}
|
||||
|
||||
mock.AddMiddleware(func(h http.Handler) http.Handler {
|
||||
_ = mock.AddMiddleware(func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
log.Info().Msgf("Request: %+v", r)
|
||||
log.Info().Msgf("request: %+v", r)
|
||||
h.ServeHTTP(w, r)
|
||||
|
||||
if r.Response != nil {
|
||||
log.Info().Msgf("Response: %+v", r.Response)
|
||||
log.Info().Msgf("response: %+v", r.Response)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@@ -26,6 +26,7 @@ func init() {
|
||||
listNodesNamespaceFlag := listNodesCmd.Flags().Lookup("namespace")
|
||||
listNodesNamespaceFlag.Deprecated = deprecateNamespaceMessage
|
||||
listNodesNamespaceFlag.Hidden = true
|
||||
|
||||
nodeCmd.AddCommand(listNodesCmd)
|
||||
|
||||
listNodeRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
@@ -42,42 +43,51 @@ func init() {
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
registerNodeCmd.Flags().StringP("key", "k", "", "Key")
|
||||
|
||||
err = registerNodeCmd.MarkFlagRequired("key")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
nodeCmd.AddCommand(registerNodeCmd)
|
||||
|
||||
expireNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
expireNodeCmd.Flags().StringP("expiry", "e", "", "Set expire to (RFC3339 format, e.g. 2025-08-27T10:00:00Z), or leave empty to expire immediately.")
|
||||
|
||||
err = expireNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
nodeCmd.AddCommand(expireNodeCmd)
|
||||
|
||||
renameNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
|
||||
err = renameNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
nodeCmd.AddCommand(renameNodeCmd)
|
||||
|
||||
deleteNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
|
||||
err = deleteNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
nodeCmd.AddCommand(deleteNodeCmd)
|
||||
|
||||
tagCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
tagCmd.MarkFlagRequired("identifier")
|
||||
_ = tagCmd.MarkFlagRequired("identifier")
|
||||
tagCmd.Flags().StringSliceP("tags", "t", []string{}, "List of tags to add to the node")
|
||||
nodeCmd.AddCommand(tagCmd)
|
||||
|
||||
approveRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
approveRoutesCmd.MarkFlagRequired("identifier")
|
||||
_ = approveRoutesCmd.MarkFlagRequired("identifier")
|
||||
approveRoutesCmd.Flags().StringSliceP("routes", "r", []string{}, `List of routes that will be approved (comma-separated, e.g. "10.0.0.0/8,192.168.0.0/24" or empty string to remove all approved routes)`)
|
||||
nodeCmd.AddCommand(approveRoutesCmd)
|
||||
|
||||
@@ -233,10 +243,7 @@ var listNodeRoutesCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
tableData, err := nodeRoutesToPtables(nodes)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
}
|
||||
tableData := nodeRoutesToPtables(nodes)
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
@@ -506,15 +513,21 @@ func nodesToPtables(
|
||||
ephemeral = true
|
||||
}
|
||||
|
||||
var lastSeen time.Time
|
||||
var lastSeenTime string
|
||||
var (
|
||||
lastSeen time.Time
|
||||
lastSeenTime string
|
||||
)
|
||||
|
||||
if node.GetLastSeen() != nil {
|
||||
lastSeen = node.GetLastSeen().AsTime()
|
||||
lastSeenTime = lastSeen.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
var expiry time.Time
|
||||
var expiryTime string
|
||||
var (
|
||||
expiry time.Time
|
||||
expiryTime string
|
||||
)
|
||||
|
||||
if node.GetExpiry() != nil {
|
||||
expiry = node.GetExpiry().AsTime()
|
||||
expiryTime = expiry.Format("2006-01-02 15:04:05")
|
||||
@@ -523,6 +536,7 @@ func nodesToPtables(
|
||||
}
|
||||
|
||||
var machineKey key.MachinePublic
|
||||
|
||||
err := machineKey.UnmarshalText(
|
||||
[]byte(node.GetMachineKey()),
|
||||
)
|
||||
@@ -531,6 +545,7 @@ func nodesToPtables(
|
||||
}
|
||||
|
||||
var nodeKey key.NodePublic
|
||||
|
||||
err = nodeKey.UnmarshalText(
|
||||
[]byte(node.GetNodeKey()),
|
||||
)
|
||||
@@ -572,8 +587,11 @@ func nodesToPtables(
|
||||
user = pterm.LightYellow(node.GetUser().GetName())
|
||||
}
|
||||
|
||||
var IPV4Address string
|
||||
var IPV6Address string
|
||||
var (
|
||||
IPV4Address string
|
||||
IPV6Address string
|
||||
)
|
||||
|
||||
for _, addr := range node.GetIpAddresses() {
|
||||
if netip.MustParseAddr(addr).Is4() {
|
||||
IPV4Address = addr
|
||||
@@ -608,7 +626,7 @@ func nodesToPtables(
|
||||
|
||||
func nodeRoutesToPtables(
|
||||
nodes []*v1.Node,
|
||||
) (pterm.TableData, error) {
|
||||
) pterm.TableData {
|
||||
tableHeader := []string{
|
||||
"ID",
|
||||
"Hostname",
|
||||
@@ -632,7 +650,7 @@ func nodeRoutesToPtables(
|
||||
)
|
||||
}
|
||||
|
||||
return tableData, nil
|
||||
return tableData
|
||||
}
|
||||
|
||||
var tagCmd = &cobra.Command{
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
bypassFlag = "bypass-grpc-and-access-database-directly"
|
||||
bypassFlag = "bypass-grpc-and-access-database-directly" //nolint:gosec // not a credential
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -26,16 +26,22 @@ func init() {
|
||||
policyCmd.AddCommand(getPolicy)
|
||||
|
||||
setPolicy.Flags().StringP("file", "f", "", "Path to a policy file in HuJSON format")
|
||||
if err := setPolicy.MarkFlagRequired("file"); err != nil {
|
||||
|
||||
err := setPolicy.MarkFlagRequired("file")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
|
||||
setPolicy.Flags().BoolP(bypassFlag, "", false, "Uses the headscale config to directly access the database, bypassing gRPC and does not require the server to be running")
|
||||
policyCmd.AddCommand(setPolicy)
|
||||
|
||||
checkPolicy.Flags().StringP("file", "f", "", "Path to a policy file in HuJSON format")
|
||||
if err := checkPolicy.MarkFlagRequired("file"); err != nil {
|
||||
|
||||
err = checkPolicy.MarkFlagRequired("file")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
|
||||
policyCmd.AddCommand(checkPolicy)
|
||||
}
|
||||
|
||||
@@ -173,7 +179,7 @@ var setPolicy = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
if _, err := client.SetPolicy(ctx, request); err != nil {
|
||||
if _, err := client.SetPolicy(ctx, request); err != nil { //nolint:noinlineerr
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to set ACL Policy: %s", err), output)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,15 +45,16 @@ func initConfig() {
|
||||
if cfgFile == "" {
|
||||
cfgFile = os.Getenv("HEADSCALE_CONFIG")
|
||||
}
|
||||
|
||||
if cfgFile != "" {
|
||||
err := types.LoadConfig(cfgFile, true)
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msgf("Error loading config file %s", cfgFile)
|
||||
log.Fatal().Caller().Err(err).Msgf("error loading config file %s", cfgFile)
|
||||
}
|
||||
} else {
|
||||
err := types.LoadConfig("", false)
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msgf("Error loading config")
|
||||
log.Fatal().Caller().Err(err).Msgf("error loading config")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,6 +81,7 @@ func initConfig() {
|
||||
Repository: "headscale",
|
||||
TagFilterFunc: filterPreReleasesIfStable(func() string { return versionInfo.Version }),
|
||||
}
|
||||
|
||||
res, err := latest.Check(githubTag, versionInfo.Version)
|
||||
if err == nil && res.Outdated {
|
||||
//nolint
|
||||
@@ -101,6 +103,7 @@ func isPreReleaseVersion(version string) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -140,7 +143,8 @@ https://github.com/juanfont/headscale`,
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
err := rootCmd.Execute()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -23,18 +23,17 @@ var serveCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app, err := newHeadscaleServerWithConfig()
|
||||
if err != nil {
|
||||
var squibbleErr squibble.ValidationError
|
||||
if errors.As(err, &squibbleErr) {
|
||||
if squibbleErr, ok := errors.AsType[squibble.ValidationError](err); ok {
|
||||
fmt.Printf("SQLite schema failed to validate:\n")
|
||||
fmt.Println(squibbleErr.Diff)
|
||||
}
|
||||
|
||||
log.Fatal().Caller().Err(err).Msg("Error initializing")
|
||||
log.Fatal().Caller().Err(err).Msg("error initializing")
|
||||
}
|
||||
|
||||
err = app.Serve()
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
log.Fatal().Caller().Err(err).Msg("Headscale ran into an error and had to shut down.")
|
||||
log.Fatal().Caller().Err(err).Msg("headscale ran into an error and had to shut down")
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -8,12 +8,19 @@ import (
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/juanfont/headscale/hscontrol/util/zlog/zf"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// CLI user errors.
|
||||
var (
|
||||
errFlagRequired = errors.New("--name or --identifier flag is required")
|
||||
errMultipleUsersMatch = errors.New("multiple users match query, specify an ID")
|
||||
)
|
||||
|
||||
func usernameAndIDFlag(cmd *cobra.Command) {
|
||||
cmd.Flags().Int64P("identifier", "i", -1, "User identifier (ID)")
|
||||
cmd.Flags().StringP("name", "n", "", "Username")
|
||||
@@ -23,12 +30,12 @@ func usernameAndIDFlag(cmd *cobra.Command) {
|
||||
// If both are empty, it will exit the program with an error.
|
||||
func usernameAndIDFromFlag(cmd *cobra.Command) (uint64, string) {
|
||||
username, _ := cmd.Flags().GetString("name")
|
||||
|
||||
identifier, _ := cmd.Flags().GetInt64("identifier")
|
||||
if username == "" && identifier < 0 {
|
||||
err := errors.New("--name or --identifier flag is required")
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot rename user: "+status.Convert(err).Message(),
|
||||
errFlagRequired,
|
||||
"Cannot rename user: "+status.Convert(errFlagRequired).Message(),
|
||||
"",
|
||||
)
|
||||
}
|
||||
@@ -50,7 +57,8 @@ func init() {
|
||||
userCmd.AddCommand(renameUserCmd)
|
||||
usernameAndIDFlag(renameUserCmd)
|
||||
renameUserCmd.Flags().StringP("new-name", "r", "", "New username")
|
||||
renameNodeCmd.MarkFlagRequired("new-name")
|
||||
|
||||
_ = renameNodeCmd.MarkFlagRequired("new-name")
|
||||
}
|
||||
|
||||
var errMissingParameter = errors.New("missing parameters")
|
||||
@@ -81,7 +89,7 @@ var createUserCmd = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
log.Trace().Interface("client", client).Msg("Obtained gRPC client")
|
||||
log.Trace().Interface(zf.Client, client).Msg("obtained gRPC client")
|
||||
|
||||
request := &v1.CreateUserRequest{Name: userName}
|
||||
|
||||
@@ -94,7 +102,7 @@ var createUserCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if pictureURL, _ := cmd.Flags().GetString("picture-url"); pictureURL != "" {
|
||||
if _, err := url.Parse(pictureURL); err != nil {
|
||||
if _, err := url.Parse(pictureURL); err != nil { //nolint:noinlineerr
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
@@ -107,7 +115,7 @@ var createUserCmd = &cobra.Command{
|
||||
request.PictureUrl = pictureURL
|
||||
}
|
||||
|
||||
log.Trace().Interface("request", request).Msg("Sending CreateUser request")
|
||||
log.Trace().Interface(zf.Request, request).Msg("sending CreateUser request")
|
||||
response, err := client.CreateUser(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
@@ -148,7 +156,7 @@ var destroyUserCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if len(users.GetUsers()) != 1 {
|
||||
err := errors.New("Unable to determine user to delete, query returned multiple users, use ID")
|
||||
err := errMultipleUsersMatch
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error: "+status.Convert(err).Message(),
|
||||
@@ -276,7 +284,7 @@ var renameUserCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if len(users.GetUsers()) != 1 {
|
||||
err := errors.New("Unable to determine user to delete, query returned multiple users, use ID")
|
||||
err := errMultipleUsersMatch
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error: "+status.Convert(err).Message(),
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/juanfont/headscale/hscontrol/util/zlog/zf"
|
||||
"github.com/rs/zerolog/log"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
@@ -57,7 +58,7 @@ func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *g
|
||||
ctx, cancel := context.WithTimeout(context.Background(), cfg.CLI.Timeout)
|
||||
|
||||
grpcOptions := []grpc.DialOption{
|
||||
grpc.WithBlock(),
|
||||
grpc.WithBlock(), //nolint:staticcheck // SA1019: deprecated but supported in 1.x
|
||||
}
|
||||
|
||||
address := cfg.CLI.Address
|
||||
@@ -81,6 +82,7 @@ func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *g
|
||||
Msgf("Unable to read/write to headscale socket, do you have the correct permissions?")
|
||||
}
|
||||
}
|
||||
|
||||
socket.Close()
|
||||
|
||||
grpcOptions = append(
|
||||
@@ -92,8 +94,9 @@ func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *g
|
||||
// If we are not connecting to a local server, require an API key for authentication
|
||||
apiKey := cfg.CLI.APIKey
|
||||
if apiKey == "" {
|
||||
log.Fatal().Caller().Msgf("HEADSCALE_CLI_API_KEY environment variable needs to be set.")
|
||||
log.Fatal().Caller().Msgf("HEADSCALE_CLI_API_KEY environment variable needs to be set")
|
||||
}
|
||||
|
||||
grpcOptions = append(grpcOptions,
|
||||
grpc.WithPerRPCCredentials(tokenAuth{
|
||||
token: apiKey,
|
||||
@@ -118,10 +121,11 @@ func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *g
|
||||
}
|
||||
}
|
||||
|
||||
log.Trace().Caller().Str("address", address).Msg("Connecting via gRPC")
|
||||
conn, err := grpc.DialContext(ctx, address, grpcOptions...)
|
||||
log.Trace().Caller().Str(zf.Address, address).Msg("connecting via gRPC")
|
||||
|
||||
conn, err := grpc.DialContext(ctx, address, grpcOptions...) //nolint:staticcheck // SA1019: deprecated but supported in 1.x
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msgf("Could not connect: %v", err)
|
||||
log.Fatal().Caller().Err(err).Msgf("could not connect: %v", err)
|
||||
os.Exit(-1) // we get here if logging is suppressed (i.e., json output)
|
||||
}
|
||||
|
||||
@@ -131,23 +135,26 @@ func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *g
|
||||
}
|
||||
|
||||
func output(result any, override string, outputFormat string) string {
|
||||
var jsonBytes []byte
|
||||
var err error
|
||||
var (
|
||||
jsonBytes []byte
|
||||
err error
|
||||
)
|
||||
|
||||
switch outputFormat {
|
||||
case "json":
|
||||
jsonBytes, err = json.MarshalIndent(result, "", "\t")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("failed to unmarshal output")
|
||||
log.Fatal().Err(err).Msg("unmarshalling output")
|
||||
}
|
||||
case "json-line":
|
||||
jsonBytes, err = json.Marshal(result)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("failed to unmarshal output")
|
||||
log.Fatal().Err(err).Msg("unmarshalling output")
|
||||
}
|
||||
case "yaml":
|
||||
jsonBytes, err = yaml.Marshal(result)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("failed to unmarshal output")
|
||||
log.Fatal().Err(err).Msg("unmarshalling output")
|
||||
}
|
||||
default:
|
||||
// nolint
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
func main() {
|
||||
var colors bool
|
||||
|
||||
switch l := termcolor.SupportLevel(os.Stderr); l {
|
||||
case termcolor.Level16M:
|
||||
colors = true
|
||||
|
||||
@@ -14,9 +14,7 @@ import (
|
||||
)
|
||||
|
||||
func TestConfigFileLoading(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "headscale")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
path, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
@@ -48,9 +46,7 @@ func TestConfigFileLoading(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConfigLoading(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "headscale")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
path, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -22,11 +22,11 @@ import (
|
||||
func cleanupBeforeTest(ctx context.Context) error {
|
||||
err := cleanupStaleTestContainers(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to clean stale test containers: %w", err)
|
||||
return fmt.Errorf("cleaning stale test containers: %w", err)
|
||||
}
|
||||
|
||||
if err := pruneDockerNetworks(ctx); err != nil {
|
||||
return fmt.Errorf("failed to prune networks: %w", err)
|
||||
if err := pruneDockerNetworks(ctx); err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("pruning networks: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -39,14 +39,14 @@ func cleanupAfterTest(ctx context.Context, cli *client.Client, containerID, runI
|
||||
Force: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove test container: %w", err)
|
||||
return fmt.Errorf("removing test container: %w", err)
|
||||
}
|
||||
|
||||
// Clean up integration test containers for this run only
|
||||
if runID != "" {
|
||||
err := killTestContainersByRunID(ctx, runID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to clean up containers for run %s: %w", runID, err)
|
||||
return fmt.Errorf("cleaning up containers for run %s: %w", runID, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,9 +55,9 @@ func cleanupAfterTest(ctx context.Context, cli *client.Client, containerID, runI
|
||||
|
||||
// killTestContainers terminates and removes all test containers.
|
||||
func killTestContainers(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
@@ -65,12 +65,14 @@ func killTestContainers(ctx context.Context) error {
|
||||
All: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers: %w", err)
|
||||
return fmt.Errorf("listing containers: %w", err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
|
||||
for _, cont := range containers {
|
||||
shouldRemove := false
|
||||
|
||||
for _, name := range cont.Names {
|
||||
if strings.Contains(name, "headscale-test-suite") ||
|
||||
strings.Contains(name, "hs-") ||
|
||||
@@ -107,9 +109,9 @@ func killTestContainers(ctx context.Context) error {
|
||||
// This function filters containers by the hi.run-id label to only affect containers
|
||||
// belonging to the specified test run, leaving other concurrent test runs untouched.
|
||||
func killTestContainersByRunID(ctx context.Context, runID string) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
@@ -121,7 +123,7 @@ func killTestContainersByRunID(ctx context.Context, runID string) error {
|
||||
),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers for run %s: %w", runID, err)
|
||||
return fmt.Errorf("listing containers for run %s: %w", runID, err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
@@ -149,9 +151,9 @@ func killTestContainersByRunID(ctx context.Context, runID string) error {
|
||||
// This is useful for cleaning up leftover containers from previous crashed or interrupted test runs
|
||||
// without interfering with currently running concurrent tests.
|
||||
func cleanupStaleTestContainers(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
@@ -164,7 +166,7 @@ func cleanupStaleTestContainers(ctx context.Context) error {
|
||||
),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list stopped containers: %w", err)
|
||||
return fmt.Errorf("listing stopped containers: %w", err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
@@ -223,15 +225,15 @@ func removeContainerWithRetry(ctx context.Context, cli *client.Client, container
|
||||
|
||||
// pruneDockerNetworks removes unused Docker networks.
|
||||
func pruneDockerNetworks(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
report, err := cli.NetworksPrune(ctx, filters.Args{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prune networks: %w", err)
|
||||
return fmt.Errorf("pruning networks: %w", err)
|
||||
}
|
||||
|
||||
if len(report.NetworksDeleted) > 0 {
|
||||
@@ -245,9 +247,9 @@ func pruneDockerNetworks(ctx context.Context) error {
|
||||
|
||||
// cleanOldImages removes test-related and old dangling Docker images.
|
||||
func cleanOldImages(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
@@ -255,12 +257,14 @@ func cleanOldImages(ctx context.Context) error {
|
||||
All: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list images: %w", err)
|
||||
return fmt.Errorf("listing images: %w", err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
|
||||
for _, img := range images {
|
||||
shouldRemove := false
|
||||
|
||||
for _, tag := range img.RepoTags {
|
||||
if strings.Contains(tag, "hs-") ||
|
||||
strings.Contains(tag, "headscale-integration") ||
|
||||
@@ -295,18 +299,19 @@ func cleanOldImages(ctx context.Context) error {
|
||||
|
||||
// cleanCacheVolume removes the Docker volume used for Go module cache.
|
||||
func cleanCacheVolume(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
volumeName := "hs-integration-go-cache"
|
||||
|
||||
err = cli.VolumeRemove(ctx, volumeName, true)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
if errdefs.IsNotFound(err) { //nolint:staticcheck // SA1019: deprecated but functional
|
||||
fmt.Printf("Go module cache volume not found: %s\n", volumeName)
|
||||
} else if errdefs.IsConflict(err) {
|
||||
} else if errdefs.IsConflict(err) { //nolint:staticcheck // SA1019: deprecated but functional
|
||||
fmt.Printf("Go module cache volume is in use and cannot be removed: %s\n", volumeName)
|
||||
} else {
|
||||
fmt.Printf("Failed to remove Go module cache volume %s: %v\n", volumeName, err)
|
||||
@@ -330,7 +335,7 @@ func cleanCacheVolume(ctx context.Context) error {
|
||||
func cleanupSuccessfulTestArtifacts(logsDir string, verbose bool) error {
|
||||
entries, err := os.ReadDir(logsDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read logs directory: %w", err)
|
||||
return fmt.Errorf("reading logs directory: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
148
cmd/hi/docker.go
148
cmd/hi/docker.go
@@ -22,17 +22,22 @@ import (
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
)
|
||||
|
||||
const defaultDirPerm = 0o755
|
||||
|
||||
var (
|
||||
ErrTestFailed = errors.New("test failed")
|
||||
ErrUnexpectedContainerWait = errors.New("unexpected end of container wait")
|
||||
ErrNoDockerContext = errors.New("no docker context found")
|
||||
ErrMemoryLimitViolations = errors.New("container(s) exceeded memory limits")
|
||||
)
|
||||
|
||||
// runTestContainer executes integration tests in a Docker container.
|
||||
//
|
||||
//nolint:gocyclo // complex test orchestration function
|
||||
func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
@@ -48,19 +53,21 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
|
||||
absLogsDir, err := filepath.Abs(logsDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get absolute path for logs directory: %w", err)
|
||||
return fmt.Errorf("getting absolute path for logs directory: %w", err)
|
||||
}
|
||||
|
||||
const dirPerm = 0o755
|
||||
if err := os.MkdirAll(absLogsDir, dirPerm); err != nil {
|
||||
return fmt.Errorf("failed to create logs directory: %w", err)
|
||||
if err := os.MkdirAll(absLogsDir, dirPerm); err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("creating logs directory: %w", err)
|
||||
}
|
||||
|
||||
if config.CleanBefore {
|
||||
if config.Verbose {
|
||||
log.Printf("Running pre-test cleanup...")
|
||||
}
|
||||
if err := cleanupBeforeTest(ctx); err != nil && config.Verbose {
|
||||
|
||||
err := cleanupBeforeTest(ctx)
|
||||
if err != nil && config.Verbose {
|
||||
log.Printf("Warning: pre-test cleanup failed: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -71,21 +78,21 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
}
|
||||
|
||||
imageName := "golang:" + config.GoVersion
|
||||
if err := ensureImageAvailable(ctx, cli, imageName, config.Verbose); err != nil {
|
||||
return fmt.Errorf("failed to ensure image availability: %w", err)
|
||||
if err := ensureImageAvailable(ctx, cli, imageName, config.Verbose); err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("ensuring image availability: %w", err)
|
||||
}
|
||||
|
||||
resp, err := createGoTestContainer(ctx, cli, config, containerName, absLogsDir, goTestCmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create container: %w", err)
|
||||
return fmt.Errorf("creating container: %w", err)
|
||||
}
|
||||
|
||||
if config.Verbose {
|
||||
log.Printf("Created container: %s", resp.ID)
|
||||
}
|
||||
|
||||
if err := cli.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to start container: %w", err)
|
||||
if err := cli.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("starting container: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("Starting test: %s", config.TestPattern)
|
||||
@@ -95,13 +102,16 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
|
||||
// Start stats collection for container resource monitoring (if enabled)
|
||||
var statsCollector *StatsCollector
|
||||
|
||||
if config.Stats {
|
||||
var err error
|
||||
statsCollector, err = NewStatsCollector()
|
||||
|
||||
statsCollector, err = NewStatsCollector(ctx)
|
||||
if err != nil {
|
||||
if config.Verbose {
|
||||
log.Printf("Warning: failed to create stats collector: %v", err)
|
||||
}
|
||||
|
||||
statsCollector = nil
|
||||
}
|
||||
|
||||
@@ -110,7 +120,8 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
|
||||
// Start stats collection immediately - no need for complex retry logic
|
||||
// The new implementation monitors Docker events and will catch containers as they start
|
||||
if err := statsCollector.StartCollection(ctx, runID, config.Verbose); err != nil {
|
||||
err := statsCollector.StartCollection(ctx, runID, config.Verbose)
|
||||
if err != nil {
|
||||
if config.Verbose {
|
||||
log.Printf("Warning: failed to start stats collection: %v", err)
|
||||
}
|
||||
@@ -122,12 +133,13 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
exitCode, err := streamAndWait(ctx, cli, resp.ID)
|
||||
|
||||
// Ensure all containers have finished and logs are flushed before extracting artifacts
|
||||
if waitErr := waitForContainerFinalization(ctx, cli, resp.ID, config.Verbose); waitErr != nil && config.Verbose {
|
||||
waitErr := waitForContainerFinalization(ctx, cli, resp.ID, config.Verbose)
|
||||
if waitErr != nil && config.Verbose {
|
||||
log.Printf("Warning: failed to wait for container finalization: %v", waitErr)
|
||||
}
|
||||
|
||||
// Extract artifacts from test containers before cleanup
|
||||
if err := extractArtifactsFromContainers(ctx, resp.ID, logsDir, config.Verbose); err != nil && config.Verbose {
|
||||
if err := extractArtifactsFromContainers(ctx, resp.ID, logsDir, config.Verbose); err != nil && config.Verbose { //nolint:noinlineerr
|
||||
log.Printf("Warning: failed to extract artifacts from containers: %v", err)
|
||||
}
|
||||
|
||||
@@ -140,12 +152,13 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
if len(violations) > 0 {
|
||||
log.Printf("MEMORY LIMIT VIOLATIONS DETECTED:")
|
||||
log.Printf("=================================")
|
||||
|
||||
for _, violation := range violations {
|
||||
log.Printf("Container %s exceeded memory limit: %.1f MB > %.1f MB",
|
||||
violation.ContainerName, violation.MaxMemoryMB, violation.LimitMB)
|
||||
}
|
||||
|
||||
return fmt.Errorf("test failed: %d container(s) exceeded memory limits", len(violations))
|
||||
return fmt.Errorf("test failed: %d %w", len(violations), ErrMemoryLimitViolations)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -176,7 +189,7 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("test execution failed: %w", err)
|
||||
return fmt.Errorf("executing test: %w", err)
|
||||
}
|
||||
|
||||
if exitCode != 0 {
|
||||
@@ -210,7 +223,7 @@ func buildGoTestCommand(config *RunConfig) []string {
|
||||
func createGoTestContainer(ctx context.Context, cli *client.Client, config *RunConfig, containerName, logsDir string, goTestCmd []string) (container.CreateResponse, error) {
|
||||
pwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return container.CreateResponse{}, fmt.Errorf("failed to get working directory: %w", err)
|
||||
return container.CreateResponse{}, fmt.Errorf("getting working directory: %w", err)
|
||||
}
|
||||
|
||||
projectRoot := findProjectRoot(pwd)
|
||||
@@ -312,7 +325,7 @@ func streamAndWait(ctx context.Context, cli *client.Client, containerID string)
|
||||
Follow: true,
|
||||
})
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("failed to get container logs: %w", err)
|
||||
return -1, fmt.Errorf("getting container logs: %w", err)
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
@@ -324,7 +337,7 @@ func streamAndWait(ctx context.Context, cli *client.Client, containerID string)
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error waiting for container: %w", err)
|
||||
return -1, fmt.Errorf("waiting for container: %w", err)
|
||||
}
|
||||
case status := <-statusCh:
|
||||
return int(status.StatusCode), nil
|
||||
@@ -338,7 +351,7 @@ func waitForContainerFinalization(ctx context.Context, cli *client.Client, testC
|
||||
// First, get all related test containers
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers: %w", err)
|
||||
return fmt.Errorf("listing containers: %w", err)
|
||||
}
|
||||
|
||||
testContainers := getCurrentTestContainers(containers, testContainerID, verbose)
|
||||
@@ -347,6 +360,7 @@ func waitForContainerFinalization(ctx context.Context, cli *client.Client, testC
|
||||
maxWaitTime := 10 * time.Second
|
||||
checkInterval := 500 * time.Millisecond
|
||||
timeout := time.After(maxWaitTime)
|
||||
|
||||
ticker := time.NewTicker(checkInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
@@ -356,6 +370,7 @@ func waitForContainerFinalization(ctx context.Context, cli *client.Client, testC
|
||||
if verbose {
|
||||
log.Printf("Timeout waiting for container finalization, proceeding with artifact extraction")
|
||||
}
|
||||
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
allFinalized := true
|
||||
@@ -366,12 +381,14 @@ func waitForContainerFinalization(ctx context.Context, cli *client.Client, testC
|
||||
if verbose {
|
||||
log.Printf("Warning: failed to inspect container %s: %v", testCont.name, err)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if container is in a final state
|
||||
if !isContainerFinalized(inspect.State) {
|
||||
allFinalized = false
|
||||
|
||||
if verbose {
|
||||
log.Printf("Container %s still finalizing (state: %s)", testCont.name, inspect.State.Status)
|
||||
}
|
||||
@@ -384,6 +401,7 @@ func waitForContainerFinalization(ctx context.Context, cli *client.Client, testC
|
||||
if verbose {
|
||||
log.Printf("All test containers finalized, ready for artifact extraction")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -400,13 +418,15 @@ func isContainerFinalized(state *container.State) bool {
|
||||
func findProjectRoot(startPath string) string {
|
||||
current := startPath
|
||||
for {
|
||||
if _, err := os.Stat(filepath.Join(current, "go.mod")); err == nil {
|
||||
if _, err := os.Stat(filepath.Join(current, "go.mod")); err == nil { //nolint:noinlineerr
|
||||
return current
|
||||
}
|
||||
|
||||
parent := filepath.Dir(current)
|
||||
if parent == current {
|
||||
return startPath
|
||||
}
|
||||
|
||||
current = parent
|
||||
}
|
||||
}
|
||||
@@ -416,6 +436,7 @@ func boolToInt(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -428,13 +449,14 @@ type DockerContext struct {
|
||||
}
|
||||
|
||||
// createDockerClient creates a Docker client with context detection.
|
||||
func createDockerClient() (*client.Client, error) {
|
||||
contextInfo, err := getCurrentDockerContext()
|
||||
func createDockerClient(ctx context.Context) (*client.Client, error) {
|
||||
contextInfo, err := getCurrentDockerContext(ctx)
|
||||
if err != nil {
|
||||
return client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
}
|
||||
|
||||
var clientOpts []client.Opt
|
||||
|
||||
clientOpts = append(clientOpts, client.WithAPIVersionNegotiation())
|
||||
|
||||
if contextInfo != nil {
|
||||
@@ -444,6 +466,7 @@ func createDockerClient() (*client.Client, error) {
|
||||
if runConfig.Verbose {
|
||||
log.Printf("Using Docker host from context '%s': %s", contextInfo.Name, host)
|
||||
}
|
||||
|
||||
clientOpts = append(clientOpts, client.WithHost(host))
|
||||
}
|
||||
}
|
||||
@@ -458,16 +481,17 @@ func createDockerClient() (*client.Client, error) {
|
||||
}
|
||||
|
||||
// getCurrentDockerContext retrieves the current Docker context information.
|
||||
func getCurrentDockerContext() (*DockerContext, error) {
|
||||
cmd := exec.Command("docker", "context", "inspect")
|
||||
func getCurrentDockerContext(ctx context.Context) (*DockerContext, error) {
|
||||
cmd := exec.CommandContext(ctx, "docker", "context", "inspect")
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get docker context: %w", err)
|
||||
return nil, fmt.Errorf("getting docker context: %w", err)
|
||||
}
|
||||
|
||||
var contexts []DockerContext
|
||||
if err := json.Unmarshal(output, &contexts); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse docker context: %w", err)
|
||||
if err := json.Unmarshal(output, &contexts); err != nil { //nolint:noinlineerr
|
||||
return nil, fmt.Errorf("parsing docker context: %w", err)
|
||||
}
|
||||
|
||||
if len(contexts) > 0 {
|
||||
@@ -486,12 +510,13 @@ func getDockerSocketPath() string {
|
||||
|
||||
// checkImageAvailableLocally checks if the specified Docker image is available locally.
|
||||
func checkImageAvailableLocally(ctx context.Context, cli *client.Client, imageName string) (bool, error) {
|
||||
_, _, err := cli.ImageInspectWithRaw(ctx, imageName)
|
||||
_, _, err := cli.ImageInspectWithRaw(ctx, imageName) //nolint:staticcheck // SA1019: deprecated but functional
|
||||
if err != nil {
|
||||
if client.IsErrNotFound(err) {
|
||||
if client.IsErrNotFound(err) { //nolint:staticcheck // SA1019: deprecated but functional
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to inspect image %s: %w", imageName, err)
|
||||
|
||||
return false, fmt.Errorf("inspecting image %s: %w", imageName, err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
@@ -502,13 +527,14 @@ func ensureImageAvailable(ctx context.Context, cli *client.Client, imageName str
|
||||
// First check if image is available locally
|
||||
available, err := checkImageAvailableLocally(ctx, cli, imageName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check local image availability: %w", err)
|
||||
return fmt.Errorf("checking local image availability: %w", err)
|
||||
}
|
||||
|
||||
if available {
|
||||
if verbose {
|
||||
log.Printf("Image %s is available locally", imageName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -519,20 +545,21 @@ func ensureImageAvailable(ctx context.Context, cli *client.Client, imageName str
|
||||
|
||||
reader, err := cli.ImagePull(ctx, imageName, image.PullOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to pull image %s: %w", imageName, err)
|
||||
return fmt.Errorf("pulling image %s: %w", imageName, err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
if verbose {
|
||||
_, err = io.Copy(os.Stdout, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read pull output: %w", err)
|
||||
return fmt.Errorf("reading pull output: %w", err)
|
||||
}
|
||||
} else {
|
||||
_, err = io.Copy(io.Discard, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read pull output: %w", err)
|
||||
return fmt.Errorf("reading pull output: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("Image %s pulled successfully", imageName)
|
||||
}
|
||||
|
||||
@@ -547,9 +574,11 @@ func listControlFiles(logsDir string) {
|
||||
return
|
||||
}
|
||||
|
||||
var logFiles []string
|
||||
var dataFiles []string
|
||||
var dataDirs []string
|
||||
var (
|
||||
logFiles []string
|
||||
dataFiles []string
|
||||
dataDirs []string
|
||||
)
|
||||
|
||||
for _, entry := range entries {
|
||||
name := entry.Name()
|
||||
@@ -578,6 +607,7 @@ func listControlFiles(logsDir string) {
|
||||
|
||||
if len(logFiles) > 0 {
|
||||
log.Printf("Headscale logs:")
|
||||
|
||||
for _, file := range logFiles {
|
||||
log.Printf(" %s", file)
|
||||
}
|
||||
@@ -585,9 +615,11 @@ func listControlFiles(logsDir string) {
|
||||
|
||||
if len(dataFiles) > 0 || len(dataDirs) > 0 {
|
||||
log.Printf("Headscale data:")
|
||||
|
||||
for _, file := range dataFiles {
|
||||
log.Printf(" %s", file)
|
||||
}
|
||||
|
||||
for _, dir := range dataDirs {
|
||||
log.Printf(" %s/", dir)
|
||||
}
|
||||
@@ -596,25 +628,27 @@ func listControlFiles(logsDir string) {
|
||||
|
||||
// extractArtifactsFromContainers collects container logs and files from the specific test run.
|
||||
func extractArtifactsFromContainers(ctx context.Context, testContainerID, logsDir string, verbose bool) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// List all containers
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers: %w", err)
|
||||
return fmt.Errorf("listing containers: %w", err)
|
||||
}
|
||||
|
||||
// Get containers from the specific test run
|
||||
currentTestContainers := getCurrentTestContainers(containers, testContainerID, verbose)
|
||||
|
||||
extractedCount := 0
|
||||
|
||||
for _, cont := range currentTestContainers {
|
||||
// Extract container logs and tar files
|
||||
if err := extractContainerArtifacts(ctx, cli, cont.ID, cont.name, logsDir, verbose); err != nil {
|
||||
err := extractContainerArtifacts(ctx, cli, cont.ID, cont.name, logsDir, verbose)
|
||||
if err != nil {
|
||||
if verbose {
|
||||
log.Printf("Warning: failed to extract artifacts from container %s (%s): %v", cont.name, cont.ID[:12], err)
|
||||
}
|
||||
@@ -622,6 +656,7 @@ func extractArtifactsFromContainers(ctx context.Context, testContainerID, logsDi
|
||||
if verbose {
|
||||
log.Printf("Extracted artifacts from container %s (%s)", cont.name, cont.ID[:12])
|
||||
}
|
||||
|
||||
extractedCount++
|
||||
}
|
||||
}
|
||||
@@ -645,11 +680,13 @@ func getCurrentTestContainers(containers []container.Summary, testContainerID st
|
||||
|
||||
// Find the test container to get its run ID label
|
||||
var runID string
|
||||
|
||||
for _, cont := range containers {
|
||||
if cont.ID == testContainerID {
|
||||
if cont.Labels != nil {
|
||||
runID = cont.Labels["hi.run-id"]
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -690,18 +727,21 @@ func getCurrentTestContainers(containers []container.Summary, testContainerID st
|
||||
// extractContainerArtifacts saves logs and tar files from a container.
|
||||
func extractContainerArtifacts(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {
|
||||
// Ensure the logs directory exists
|
||||
if err := os.MkdirAll(logsDir, 0o755); err != nil {
|
||||
return fmt.Errorf("failed to create logs directory: %w", err)
|
||||
err := os.MkdirAll(logsDir, defaultDirPerm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating logs directory: %w", err)
|
||||
}
|
||||
|
||||
// Extract container logs
|
||||
if err := extractContainerLogs(ctx, cli, containerID, containerName, logsDir, verbose); err != nil {
|
||||
return fmt.Errorf("failed to extract logs: %w", err)
|
||||
err = extractContainerLogs(ctx, cli, containerID, containerName, logsDir, verbose)
|
||||
if err != nil {
|
||||
return fmt.Errorf("extracting logs: %w", err)
|
||||
}
|
||||
|
||||
// Extract tar files for headscale containers only
|
||||
if strings.HasPrefix(containerName, "hs-") {
|
||||
if err := extractContainerFiles(ctx, cli, containerID, containerName, logsDir, verbose); err != nil {
|
||||
err := extractContainerFiles(ctx, cli, containerID, containerName, logsDir, verbose)
|
||||
if err != nil {
|
||||
if verbose {
|
||||
log.Printf("Warning: failed to extract files from %s: %v", containerName, err)
|
||||
}
|
||||
@@ -723,7 +763,7 @@ func extractContainerLogs(ctx context.Context, cli *client.Client, containerID,
|
||||
Tail: "all",
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get container logs: %w", err)
|
||||
return fmt.Errorf("getting container logs: %w", err)
|
||||
}
|
||||
defer logReader.Close()
|
||||
|
||||
@@ -737,17 +777,17 @@ func extractContainerLogs(ctx context.Context, cli *client.Client, containerID,
|
||||
// Demultiplex the Docker logs stream to separate stdout and stderr
|
||||
_, err = stdcopy.StdCopy(&stdoutBuf, &stderrBuf, logReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to demultiplex container logs: %w", err)
|
||||
return fmt.Errorf("demultiplexing container logs: %w", err)
|
||||
}
|
||||
|
||||
// Write stdout logs
|
||||
if err := os.WriteFile(stdoutPath, stdoutBuf.Bytes(), 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write stdout log: %w", err)
|
||||
if err := os.WriteFile(stdoutPath, stdoutBuf.Bytes(), 0o644); err != nil { //nolint:gosec,noinlineerr // log files should be readable
|
||||
return fmt.Errorf("writing stdout log: %w", err)
|
||||
}
|
||||
|
||||
// Write stderr logs
|
||||
if err := os.WriteFile(stderrPath, stderrBuf.Bytes(), 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write stderr log: %w", err)
|
||||
if err := os.WriteFile(stderrPath, stderrBuf.Bytes(), 0o644); err != nil { //nolint:gosec,noinlineerr // log files should be readable
|
||||
return fmt.Errorf("writing stderr log: %w", err)
|
||||
}
|
||||
|
||||
if verbose {
|
||||
|
||||
@@ -38,13 +38,13 @@ func runDoctorCheck(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// Check 3: Go installation
|
||||
results = append(results, checkGoInstallation())
|
||||
results = append(results, checkGoInstallation(ctx))
|
||||
|
||||
// Check 4: Git repository
|
||||
results = append(results, checkGitRepository())
|
||||
results = append(results, checkGitRepository(ctx))
|
||||
|
||||
// Check 5: Required files
|
||||
results = append(results, checkRequiredFiles())
|
||||
results = append(results, checkRequiredFiles(ctx))
|
||||
|
||||
// Display results
|
||||
displayDoctorResults(results)
|
||||
@@ -86,7 +86,7 @@ func checkDockerBinary() DoctorResult {
|
||||
|
||||
// checkDockerDaemon verifies Docker daemon is running and accessible.
|
||||
func checkDockerDaemon(ctx context.Context) DoctorResult {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Daemon",
|
||||
@@ -124,8 +124,8 @@ func checkDockerDaemon(ctx context.Context) DoctorResult {
|
||||
}
|
||||
|
||||
// checkDockerContext verifies Docker context configuration.
|
||||
func checkDockerContext(_ context.Context) DoctorResult {
|
||||
contextInfo, err := getCurrentDockerContext()
|
||||
func checkDockerContext(ctx context.Context) DoctorResult {
|
||||
contextInfo, err := getCurrentDockerContext(ctx)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Context",
|
||||
@@ -155,7 +155,7 @@ func checkDockerContext(_ context.Context) DoctorResult {
|
||||
|
||||
// checkDockerSocket verifies Docker socket accessibility.
|
||||
func checkDockerSocket(ctx context.Context) DoctorResult {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Socket",
|
||||
@@ -192,7 +192,7 @@ func checkDockerSocket(ctx context.Context) DoctorResult {
|
||||
|
||||
// checkGolangImage verifies the golang Docker image is available locally or can be pulled.
|
||||
func checkGolangImage(ctx context.Context) DoctorResult {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Golang Image",
|
||||
@@ -251,7 +251,7 @@ func checkGolangImage(ctx context.Context) DoctorResult {
|
||||
}
|
||||
|
||||
// checkGoInstallation verifies Go is installed and working.
|
||||
func checkGoInstallation() DoctorResult {
|
||||
func checkGoInstallation(ctx context.Context) DoctorResult {
|
||||
_, err := exec.LookPath("go")
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
@@ -265,7 +265,8 @@ func checkGoInstallation() DoctorResult {
|
||||
}
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", "version")
|
||||
cmd := exec.CommandContext(ctx, "go", "version")
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
@@ -285,8 +286,9 @@ func checkGoInstallation() DoctorResult {
|
||||
}
|
||||
|
||||
// checkGitRepository verifies we're in a git repository.
|
||||
func checkGitRepository() DoctorResult {
|
||||
cmd := exec.Command("git", "rev-parse", "--git-dir")
|
||||
func checkGitRepository(ctx context.Context) DoctorResult {
|
||||
cmd := exec.CommandContext(ctx, "git", "rev-parse", "--git-dir")
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
@@ -308,7 +310,7 @@ func checkGitRepository() DoctorResult {
|
||||
}
|
||||
|
||||
// checkRequiredFiles verifies required files exist.
|
||||
func checkRequiredFiles() DoctorResult {
|
||||
func checkRequiredFiles(ctx context.Context) DoctorResult {
|
||||
requiredFiles := []string{
|
||||
"go.mod",
|
||||
"integration/",
|
||||
@@ -316,9 +318,12 @@ func checkRequiredFiles() DoctorResult {
|
||||
}
|
||||
|
||||
var missingFiles []string
|
||||
|
||||
for _, file := range requiredFiles {
|
||||
cmd := exec.Command("test", "-e", file)
|
||||
if err := cmd.Run(); err != nil {
|
||||
cmd := exec.CommandContext(ctx, "test", "-e", file)
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
missingFiles = append(missingFiles, file)
|
||||
}
|
||||
}
|
||||
@@ -350,6 +355,7 @@ func displayDoctorResults(results []DoctorResult) {
|
||||
|
||||
for _, result := range results {
|
||||
var icon string
|
||||
|
||||
switch result.Status {
|
||||
case "PASS":
|
||||
icon = "✅"
|
||||
|
||||
@@ -79,13 +79,18 @@ func main() {
|
||||
}
|
||||
|
||||
func cleanAll(ctx context.Context) error {
|
||||
if err := killTestContainers(ctx); err != nil {
|
||||
err := killTestContainers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pruneDockerNetworks(ctx); err != nil {
|
||||
|
||||
err = pruneDockerNetworks(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cleanOldImages(ctx); err != nil {
|
||||
|
||||
err = cleanOldImages(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -48,7 +48,9 @@ func runIntegrationTest(env *command.Env) error {
|
||||
if runConfig.Verbose {
|
||||
log.Printf("Running pre-flight system checks...")
|
||||
}
|
||||
if err := runDoctorCheck(env.Context()); err != nil {
|
||||
|
||||
err := runDoctorCheck(env.Context())
|
||||
if err != nil {
|
||||
return fmt.Errorf("pre-flight checks failed: %w", err)
|
||||
}
|
||||
|
||||
@@ -66,15 +68,15 @@ func runIntegrationTest(env *command.Env) error {
|
||||
func detectGoVersion() string {
|
||||
goModPath := filepath.Join("..", "..", "go.mod")
|
||||
|
||||
if _, err := os.Stat("go.mod"); err == nil {
|
||||
if _, err := os.Stat("go.mod"); err == nil { //nolint:noinlineerr
|
||||
goModPath = "go.mod"
|
||||
} else if _, err := os.Stat("../../go.mod"); err == nil {
|
||||
} else if _, err := os.Stat("../../go.mod"); err == nil { //nolint:noinlineerr
|
||||
goModPath = "../../go.mod"
|
||||
}
|
||||
|
||||
content, err := os.ReadFile(goModPath)
|
||||
if err != nil {
|
||||
return "1.25"
|
||||
return "1.26rc2"
|
||||
}
|
||||
|
||||
lines := splitLines(string(content))
|
||||
@@ -89,13 +91,15 @@ func detectGoVersion() string {
|
||||
}
|
||||
}
|
||||
|
||||
return "1.25"
|
||||
return "1.26rc2"
|
||||
}
|
||||
|
||||
// splitLines splits a string into lines without using strings.Split.
|
||||
func splitLines(s string) []string {
|
||||
var lines []string
|
||||
var current string
|
||||
var (
|
||||
lines []string
|
||||
current string
|
||||
)
|
||||
|
||||
for _, char := range s {
|
||||
if char == '\n' {
|
||||
|
||||
@@ -18,6 +18,9 @@ import (
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
// ErrStatsCollectionAlreadyStarted is returned when trying to start stats collection that is already running.
|
||||
var ErrStatsCollectionAlreadyStarted = errors.New("stats collection already started")
|
||||
|
||||
// ContainerStats represents statistics for a single container.
|
||||
type ContainerStats struct {
|
||||
ContainerID string
|
||||
@@ -44,10 +47,10 @@ type StatsCollector struct {
|
||||
}
|
||||
|
||||
// NewStatsCollector creates a new stats collector instance.
|
||||
func NewStatsCollector() (*StatsCollector, error) {
|
||||
cli, err := createDockerClient()
|
||||
func NewStatsCollector(ctx context.Context) (*StatsCollector, error) {
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return nil, fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
|
||||
return &StatsCollector{
|
||||
@@ -63,17 +66,19 @@ func (sc *StatsCollector) StartCollection(ctx context.Context, runID string, ver
|
||||
defer sc.mutex.Unlock()
|
||||
|
||||
if sc.collectionStarted {
|
||||
return errors.New("stats collection already started")
|
||||
return ErrStatsCollectionAlreadyStarted
|
||||
}
|
||||
|
||||
sc.collectionStarted = true
|
||||
|
||||
// Start monitoring existing containers
|
||||
sc.wg.Add(1)
|
||||
|
||||
go sc.monitorExistingContainers(ctx, runID, verbose)
|
||||
|
||||
// Start Docker events monitoring for new containers
|
||||
sc.wg.Add(1)
|
||||
|
||||
go sc.monitorDockerEvents(ctx, runID, verbose)
|
||||
|
||||
if verbose {
|
||||
@@ -87,10 +92,12 @@ func (sc *StatsCollector) StartCollection(ctx context.Context, runID string, ver
|
||||
func (sc *StatsCollector) StopCollection() {
|
||||
// Check if already stopped without holding lock
|
||||
sc.mutex.RLock()
|
||||
|
||||
if !sc.collectionStarted {
|
||||
sc.mutex.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
sc.mutex.RUnlock()
|
||||
|
||||
// Signal stop to all goroutines
|
||||
@@ -114,6 +121,7 @@ func (sc *StatsCollector) monitorExistingContainers(ctx context.Context, runID s
|
||||
if verbose {
|
||||
log.Printf("Failed to list existing containers: %v", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -147,13 +155,13 @@ func (sc *StatsCollector) monitorDockerEvents(ctx context.Context, runID string,
|
||||
case event := <-events:
|
||||
if event.Type == "container" && event.Action == "start" {
|
||||
// Get container details
|
||||
containerInfo, err := sc.client.ContainerInspect(ctx, event.ID)
|
||||
containerInfo, err := sc.client.ContainerInspect(ctx, event.ID) //nolint:staticcheck // SA1019: use Actor.ID
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Convert to types.Container format for consistency
|
||||
cont := types.Container{
|
||||
cont := types.Container{ //nolint:staticcheck // SA1019: use container.Summary
|
||||
ID: containerInfo.ID,
|
||||
Names: []string{containerInfo.Name},
|
||||
Labels: containerInfo.Config.Labels,
|
||||
@@ -167,13 +175,14 @@ func (sc *StatsCollector) monitorDockerEvents(ctx context.Context, runID string,
|
||||
if verbose {
|
||||
log.Printf("Error in Docker events stream: %v", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// shouldMonitorContainer determines if a container should be monitored.
|
||||
func (sc *StatsCollector) shouldMonitorContainer(cont types.Container, runID string) bool {
|
||||
func (sc *StatsCollector) shouldMonitorContainer(cont types.Container, runID string) bool { //nolint:staticcheck // SA1019: use container.Summary
|
||||
// Check if it has the correct run ID label
|
||||
if cont.Labels == nil || cont.Labels["hi.run-id"] != runID {
|
||||
return false
|
||||
@@ -213,6 +222,7 @@ func (sc *StatsCollector) startStatsForContainer(ctx context.Context, containerI
|
||||
}
|
||||
|
||||
sc.wg.Add(1)
|
||||
|
||||
go sc.collectStatsForContainer(ctx, containerID, verbose)
|
||||
}
|
||||
|
||||
@@ -226,12 +236,14 @@ func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containe
|
||||
if verbose {
|
||||
log.Printf("Failed to get stats stream for container %s: %v", containerID[:12], err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
defer statsResponse.Body.Close()
|
||||
|
||||
decoder := json.NewDecoder(statsResponse.Body)
|
||||
var prevStats *container.Stats
|
||||
|
||||
var prevStats *container.Stats //nolint:staticcheck // SA1019: use StatsResponse
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -240,12 +252,15 @@ func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containe
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
var stats container.Stats
|
||||
if err := decoder.Decode(&stats); err != nil {
|
||||
var stats container.Stats //nolint:staticcheck // SA1019: use StatsResponse
|
||||
|
||||
err := decoder.Decode(&stats)
|
||||
if err != nil {
|
||||
// EOF is expected when container stops or stream ends
|
||||
if err.Error() != "EOF" && verbose {
|
||||
log.Printf("Failed to decode stats for container %s: %v", containerID[:12], err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -261,8 +276,10 @@ func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containe
|
||||
// Store the sample (skip first sample since CPU calculation needs previous stats)
|
||||
if prevStats != nil {
|
||||
// Get container stats reference without holding the main mutex
|
||||
var containerStats *ContainerStats
|
||||
var exists bool
|
||||
var (
|
||||
containerStats *ContainerStats
|
||||
exists bool
|
||||
)
|
||||
|
||||
sc.mutex.RLock()
|
||||
containerStats, exists = sc.containers[containerID]
|
||||
@@ -286,7 +303,7 @@ func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containe
|
||||
}
|
||||
|
||||
// calculateCPUPercent calculates CPU usage percentage from Docker stats.
|
||||
func calculateCPUPercent(prevStats, stats *container.Stats) float64 {
|
||||
func calculateCPUPercent(prevStats, stats *container.Stats) float64 { //nolint:staticcheck // SA1019: use StatsResponse
|
||||
// CPU calculation based on Docker's implementation
|
||||
cpuDelta := float64(stats.CPUStats.CPUUsage.TotalUsage) - float64(prevStats.CPUStats.CPUUsage.TotalUsage)
|
||||
systemDelta := float64(stats.CPUStats.SystemUsage) - float64(prevStats.CPUStats.SystemUsage)
|
||||
@@ -331,10 +348,12 @@ type StatsSummary struct {
|
||||
func (sc *StatsCollector) GetSummary() []ContainerStatsSummary {
|
||||
// Take snapshot of container references without holding main lock long
|
||||
sc.mutex.RLock()
|
||||
|
||||
containerRefs := make([]*ContainerStats, 0, len(sc.containers))
|
||||
for _, containerStats := range sc.containers {
|
||||
containerRefs = append(containerRefs, containerStats)
|
||||
}
|
||||
|
||||
sc.mutex.RUnlock()
|
||||
|
||||
summaries := make([]ContainerStatsSummary, 0, len(containerRefs))
|
||||
@@ -384,23 +403,25 @@ func calculateStatsSummary(values []float64) StatsSummary {
|
||||
return StatsSummary{}
|
||||
}
|
||||
|
||||
min := values[0]
|
||||
max := values[0]
|
||||
minVal := values[0]
|
||||
maxVal := values[0]
|
||||
sum := 0.0
|
||||
|
||||
for _, value := range values {
|
||||
if value < min {
|
||||
min = value
|
||||
if value < minVal {
|
||||
minVal = value
|
||||
}
|
||||
if value > max {
|
||||
max = value
|
||||
|
||||
if value > maxVal {
|
||||
maxVal = value
|
||||
}
|
||||
|
||||
sum += value
|
||||
}
|
||||
|
||||
return StatsSummary{
|
||||
Min: min,
|
||||
Max: max,
|
||||
Min: minVal,
|
||||
Max: maxVal,
|
||||
Average: sum / float64(len(values)),
|
||||
}
|
||||
}
|
||||
@@ -434,6 +455,7 @@ func (sc *StatsCollector) CheckMemoryLimits(hsLimitMB, tsLimitMB float64) []Memo
|
||||
}
|
||||
|
||||
summaries := sc.GetSummary()
|
||||
|
||||
var violations []MemoryViolation
|
||||
|
||||
for _, summary := range summaries {
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@@ -15,7 +16,10 @@ type MapConfig struct {
|
||||
Directory string `flag:"directory,Directory to read map responses from"`
|
||||
}
|
||||
|
||||
var mapConfig MapConfig
|
||||
var (
|
||||
mapConfig MapConfig
|
||||
errDirectoryRequired = errors.New("directory is required")
|
||||
)
|
||||
|
||||
func main() {
|
||||
root := command.C{
|
||||
@@ -40,7 +44,7 @@ func main() {
|
||||
// runIntegrationTest executes the integration test workflow.
|
||||
func runOnline(env *command.Env) error {
|
||||
if mapConfig.Directory == "" {
|
||||
return fmt.Errorf("directory is required")
|
||||
return errDirectoryRequired
|
||||
}
|
||||
|
||||
resps, err := mapper.ReadMapResponsesFromDirectory(mapConfig.Directory)
|
||||
@@ -57,5 +61,6 @@ func runOnline(env *command.Env) error {
|
||||
|
||||
os.Stderr.Write(out)
|
||||
os.Stderr.Write([]byte("\n"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,16 +5,16 @@ to provide self-hosters and hobbyists with an open-source server they can use fo
|
||||
provides on overview of Headscale's feature and compatibility with the Tailscale control server:
|
||||
|
||||
- [x] Full "base" support of Tailscale's features
|
||||
- [x] Node registration
|
||||
- [x] Interactive
|
||||
- [x] Pre authenticated key
|
||||
- [x] [Node registration](../ref/registration.md)
|
||||
- [x] [Web authentication](../ref/registration.md#web-authentication)
|
||||
- [x] [Pre authenticated key](../ref/registration.md#pre-authenticated-key)
|
||||
- [x] [DNS](../ref/dns.md)
|
||||
- [x] [MagicDNS](https://tailscale.com/kb/1081/magicdns)
|
||||
- [x] [Global and restricted nameservers (split DNS)](https://tailscale.com/kb/1054/dns#nameservers)
|
||||
- [x] [search domains](https://tailscale.com/kb/1054/dns#search-domains)
|
||||
- [x] [Extra DNS records (Headscale only)](../ref/dns.md#setting-extra-dns-records)
|
||||
- [x] [Taildrop (File Sharing)](https://tailscale.com/kb/1106/taildrop)
|
||||
- [x] [Tags](https://tailscale.com/kb/1068/tags)
|
||||
- [x] [Tags](../ref/tags.md)
|
||||
- [x] [Routes](../ref/routes.md)
|
||||
- [x] [Subnet routers](../ref/routes.md#subnet-router)
|
||||
- [x] [Exit nodes](../ref/routes.md#exit-node)
|
||||
|
||||
@@ -222,7 +222,7 @@ Allows access to the internet through [exit nodes](routes.md#exit-node). Can onl
|
||||
|
||||
### `autogroup:member`
|
||||
|
||||
Includes all untagged devices.
|
||||
Includes all [personal (untagged) devices](registration.md/#identity-model).
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -234,7 +234,7 @@ Includes all untagged devices.
|
||||
|
||||
### `autogroup:tagged`
|
||||
|
||||
Includes all devices that have at least one tag.
|
||||
Includes all devices that [have at least one tag](registration.md/#identity-model).
|
||||
|
||||
```json
|
||||
{
|
||||
|
||||
@@ -54,7 +54,7 @@ Headscale server at `/swagger` for details.
|
||||
|
||||
```console
|
||||
curl -H "Authorization: Bearer <API_KEY>" \
|
||||
-d user=<USER> -d key=<KEY> \
|
||||
-d user=<USER> -d key=<REGISTRATION_KEY> \
|
||||
https://headscale.example.com/api/v1/node/register
|
||||
```
|
||||
|
||||
|
||||
@@ -250,7 +250,7 @@ Authelia is fully supported by Headscale.
|
||||
### Authentik
|
||||
|
||||
- Authentik is fully supported by Headscale.
|
||||
- [Headscale does not JSON Web Encryption](https://github.com/juanfont/headscale/issues/2446). Leave the field
|
||||
- [Headscale does not support JSON Web Encryption](https://github.com/juanfont/headscale/issues/2446). Leave the field
|
||||
`Encryption Key` in the providers section unset.
|
||||
|
||||
### Google OAuth
|
||||
|
||||
141
docs/ref/registration.md
Normal file
141
docs/ref/registration.md
Normal file
@@ -0,0 +1,141 @@
|
||||
# Registration methods
|
||||
|
||||
Headscale supports multiple ways to register a node. The preferred registration method depends on the identity of a node
|
||||
and your use case.
|
||||
|
||||
## Identity model
|
||||
|
||||
Tailscale's identity model distinguishes between personal and tagged nodes:
|
||||
|
||||
- A personal node (or user-owned node) is owned by a human and typically refers to end-user devices such as laptops,
|
||||
workstations or mobile phones. End-user devices are managed by a single user.
|
||||
- A tagged node (or service-based node or non-human node) provides services to the network. Common examples include web-
|
||||
and database servers. Those nodes are typically managed by a team of users. Some additional restrictions apply for
|
||||
tagged nodes, e.g. a tagged node is not allowed to [Tailscale SSH](https://tailscale.com/kb/1193/tailscale-ssh) into a
|
||||
personal node.
|
||||
|
||||
Headscale implements Tailscale's identity model and distinguishes between personal and tagged nodes where a personal
|
||||
node is owned by a Headscale user and a tagged node is owned by a tag. Tagged devices are grouped under the special user
|
||||
`tagged-devices`.
|
||||
|
||||
## Registration methods
|
||||
|
||||
There are two main ways to register new nodes, [web authentication](#web-authentication) and [registration with a pre
|
||||
authenticated key](#pre-authenticated-key). Both methods can be used to register personal and tagged nodes.
|
||||
|
||||
### Web authentication
|
||||
|
||||
Web authentication is the default method to register a new node. It's interactive, where the client initiates the
|
||||
registration and the Headscale administrator needs to approve the new node before it is allowed to join the network. A
|
||||
node can be approved with:
|
||||
|
||||
- Headscale CLI (described in this documentation)
|
||||
- [Headscale API](api.md)
|
||||
- Or delegated to an identity provider via [OpenID Connect](oidc.md)
|
||||
|
||||
Web authentication relies on the presence of a Headscale user. Use the `headscale users` command to create a new user:
|
||||
|
||||
```console
|
||||
headscale users create <USER>
|
||||
```
|
||||
|
||||
=== "Personal devices"
|
||||
|
||||
Run `tailscale up` to login your personal device:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL>
|
||||
```
|
||||
|
||||
Usually, a browser window with further instructions is opened. This page explains how to complete the registration
|
||||
on your Headscale server and it also prints the registration key required to approve the node:
|
||||
|
||||
```console
|
||||
headscale nodes register --user <USER> --key <REGISTRATION_KEY>
|
||||
```
|
||||
|
||||
Congrations, the registration of your personal node is complete and it should be listed as "online" in the output of
|
||||
`headscale nodes list`. The "User" column displays `<USER>` as the owner of the node.
|
||||
|
||||
=== "Tagged devices"
|
||||
|
||||
Your Headscale user needs to be authorized to register tagged devices. This authorization is specified in the
|
||||
[`tagOwners`](https://tailscale.com/kb/1337/policy-syntax#tag-owners) section of the [ACL](acls.md). A simple
|
||||
example looks like this:
|
||||
|
||||
```json title="The user alice can register nodes tagged with tag:server"
|
||||
{
|
||||
"tagOwners": {
|
||||
"tag:server": ["alice@"]
|
||||
},
|
||||
// more rules
|
||||
}
|
||||
```
|
||||
|
||||
Run `tailscale up` and provide at least one tag to login a tagged device:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags tag:<TAG>
|
||||
```
|
||||
|
||||
Usually, a browser window with further instructions is opened. This page explains how to complete the registration
|
||||
on your Headscale server and it also prints the registration key required to approve the node:
|
||||
|
||||
```console
|
||||
headscale nodes register --user <USER> --key <REGISTRATION_KEY>
|
||||
```
|
||||
|
||||
Headscale checks that `<USER>` is allowed to register a node with the specified tag(s) and then transfers ownership
|
||||
of the new node to the special user `tagged-devices`. The registration of a tagged node is complete and it should be
|
||||
listed as "online" in the output of `headscale nodes list`. The "User" column displays `tagged-devices` as the owner
|
||||
of the node. See the "Tags" column for the list of assigned tags.
|
||||
|
||||
### Pre authenticated key
|
||||
|
||||
Registration with a pre authenticated key (or auth key) is a non-interactive way to register a new node. The Headscale
|
||||
administrator creates a preauthkey upfront and this preauthkey can then be used to register a node non-interactively.
|
||||
Its best suited for automation.
|
||||
|
||||
=== "Personal devices"
|
||||
|
||||
A personal node is always assigned to a Headscale user. Use the `headscale users` command to create a new user:
|
||||
|
||||
```console
|
||||
headscale users create <USER>
|
||||
```
|
||||
|
||||
Use the `headscale user list` command to learn its `<USER_ID>` and create a new pre authenticated key for your user:
|
||||
|
||||
```console
|
||||
headscale preauthkeys create --user <USER_ID>
|
||||
```
|
||||
|
||||
The above prints a pre authenticated key with the default settings (can be used once and is valid for one hour). Use
|
||||
this auth key to register a node non-interactively:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
Congrations, the registration of your personal node is complete and it should be listed as "online" in the output of
|
||||
`headscale nodes list`. The "User" column displays `<USER>` as the owner of the node.
|
||||
|
||||
=== "Tagged devices"
|
||||
|
||||
Create a new pre authenticated key and provide at least one tag:
|
||||
|
||||
```console
|
||||
headscale preauthkeys create --tags tag:<TAG>
|
||||
```
|
||||
|
||||
The above prints a pre authenticated key with the default settings (can be used once and is valid for one hour). Use
|
||||
this auth key to register a node non-interactively. You don't need to provide the `--advertise-tags` parameter as
|
||||
the tags are automatically read from the pre authenticated key:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
The registration of a tagged node is complete and it should be listed as "online" in the output of `headscale nodes
|
||||
list`. The "User" column displays `tagged-devices` as the owner of the node. See the "Tags" column for the list of
|
||||
assigned tags.
|
||||
54
docs/ref/tags.md
Normal file
54
docs/ref/tags.md
Normal file
@@ -0,0 +1,54 @@
|
||||
# Tags
|
||||
|
||||
Headscale supports Tailscale tags. Please read [Tailscale's tag documentation](https://tailscale.com/kb/1068/tags) to
|
||||
learn how tags work and how to use them.
|
||||
|
||||
Tags can be applied during [node registration](registration.md):
|
||||
|
||||
- using the `--advertise-tags` flag, see [web authentication for tagged devices](registration.md#__tabbed_1_2)
|
||||
- using a tagged pre authenticated key, see [how to create and use it](registration.md#__tabbed_2_2)
|
||||
|
||||
Administrators can manage tags with:
|
||||
|
||||
- Headscale CLI
|
||||
- [Headscale API](api.md)
|
||||
|
||||
## Common operations
|
||||
|
||||
### Manage tags for a node
|
||||
|
||||
Run `headscale nodes list` to list the tags for a node.
|
||||
|
||||
Use the `headscale nodes tag` command to modify the tags for a node. At least one tag is required and multiple tags can
|
||||
be provided as comma separated list. The following command sets the tags `tag:server` and `tag:prod` on node with ID 1:
|
||||
|
||||
```console
|
||||
headscale nodes tag -i 1 -t tag:server,tag:prod
|
||||
```
|
||||
|
||||
### Convert from personal to tagged node
|
||||
|
||||
Use the `headscale nodes tag` command to convert a personal (user-owned) node to a tagged node:
|
||||
|
||||
```console
|
||||
headscale nodes tag -i <NODE_ID> -t <TAG>
|
||||
```
|
||||
|
||||
The node is now owned by the special user `tagged-devices` and has the specified tags assigned to it.
|
||||
|
||||
### Convert from tagged to personal node
|
||||
|
||||
Tagged nodes can return to personal (user-owned) nodes by re-authenticating with:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags= --force-reauth
|
||||
```
|
||||
|
||||
Usually, a browser window with further instructions is opened. This page explains how to complete the registration on
|
||||
your Headscale server and it also prints the registration key required to approve the node:
|
||||
|
||||
```console
|
||||
headscale nodes register --user <USER> --key <REGISTRATION_KEY>
|
||||
```
|
||||
|
||||
All previously assigned tags get removed and the node is now owned by the user specified in the above command.
|
||||
@@ -6,7 +6,7 @@ This documentation has the goal of showing how a user can use the official Andro
|
||||
|
||||
Install the official Tailscale Android client from the [Google Play Store](https://play.google.com/store/apps/details?id=com.tailscale.ipn) or [F-Droid](https://f-droid.org/packages/com.tailscale.ipn/).
|
||||
|
||||
## Connect via normal, interactive login
|
||||
## Connect via web authentication
|
||||
|
||||
- Open the app and select the settings menu in the upper-right corner
|
||||
- Tap on `Accounts`
|
||||
@@ -15,7 +15,7 @@ Install the official Tailscale Android client from the [Google Play Store](https
|
||||
- The client connects automatically as soon as the node registration is complete on headscale. Until then, nothing is
|
||||
visible in the server logs.
|
||||
|
||||
## Connect using a preauthkey
|
||||
## Connect using a pre authenticated key
|
||||
|
||||
- Open the app and select the settings menu in the upper-right corner
|
||||
- Tap on `Accounts`
|
||||
@@ -24,5 +24,5 @@ Install the official Tailscale Android client from the [Google Play Store](https
|
||||
- Open the settings menu in the upper-right corner
|
||||
- Tap on `Accounts`
|
||||
- In the kebab menu icon (three dots) in the upper-right corner select `Use an auth key`
|
||||
- Enter your [preauthkey generated from headscale](../getting-started.md#using-a-preauthkey)
|
||||
- Enter your [preauthkey generated from headscale](../../ref/registration.md#pre-authenticated-key)
|
||||
- If needed, tap `Log in` on the main screen. You should now be connected to your headscale.
|
||||
|
||||
@@ -60,10 +60,9 @@ options, run:
|
||||
|
||||
## Manage headscale users
|
||||
|
||||
In headscale, a node (also known as machine or device) is always assigned to a
|
||||
headscale user. Such a headscale user may have many nodes assigned to them and
|
||||
can be managed with the `headscale users` command. Invoke the built-in help for
|
||||
more information: `headscale users --help`.
|
||||
In headscale, a node (also known as machine or device) is [typically assigned to a headscale
|
||||
user](../ref/registration.md#identity-model). Such a headscale user may have many nodes assigned to them and can be
|
||||
managed with the `headscale users` command. Invoke the built-in help for more information: `headscale users --help`.
|
||||
|
||||
### Create a headscale user
|
||||
|
||||
@@ -97,11 +96,12 @@ more information: `headscale users --help`.
|
||||
|
||||
## Register a node
|
||||
|
||||
One has to register a node first to use headscale as coordination with Tailscale. The following examples work for the
|
||||
Tailscale client on Linux/BSD operating systems. Alternatively, follow the instructions to connect
|
||||
[Android](connect/android.md), [Apple](connect/apple.md) or [Windows](connect/windows.md) devices.
|
||||
One has to [register a node](../ref/registration.md) first to use headscale as coordination server with Tailscale. The
|
||||
following examples work for the Tailscale client on Linux/BSD operating systems. Alternatively, follow the instructions
|
||||
to connect [Android](connect/android.md), [Apple](connect/apple.md) or [Windows](connect/windows.md) devices. Read
|
||||
[registration methods](../ref/registration.md) for an overview of available registration methods.
|
||||
|
||||
### Normal, interactive login
|
||||
### [Web authentication](../ref/registration.md#web-authentication)
|
||||
|
||||
On a client machine, run the `tailscale up` command and provide the FQDN of your headscale instance as argument:
|
||||
|
||||
@@ -109,23 +109,23 @@ On a client machine, run the `tailscale up` command and provide the FQDN of your
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL>
|
||||
```
|
||||
|
||||
Usually, a browser window with further instructions is opened and contains the value for `<YOUR_MACHINE_KEY>`. Approve
|
||||
and register the node on your headscale server:
|
||||
Usually, a browser window with further instructions is opened. This page explains how to complete the registration on
|
||||
your headscale server and it also prints the registration key required to approve the node:
|
||||
|
||||
=== "Native"
|
||||
|
||||
```shell
|
||||
headscale nodes register --user <USER> --key <YOUR_MACHINE_KEY>
|
||||
headscale nodes register --user <USER> --key <REGISTRATION_KEY>
|
||||
```
|
||||
|
||||
=== "Container"
|
||||
|
||||
```shell
|
||||
docker exec -it headscale \
|
||||
headscale nodes register --user <USER> --key <YOUR_MACHINE_KEY>
|
||||
headscale nodes register --user <USER> --key <REGISTRATION_KEY>
|
||||
```
|
||||
|
||||
### Using a preauthkey
|
||||
### [Pre authenticated key](../ref/registration.md#pre-authenticated-key)
|
||||
|
||||
It is also possible to generate a preauthkey and register a node non-interactively. First, generate a preauthkey on the
|
||||
headscale instance. By default, the key is valid for one hour and can only be used once (see `headscale preauthkeys
|
||||
|
||||
6
flake.lock
generated
6
flake.lock
generated
@@ -20,11 +20,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1768875095,
|
||||
"narHash": "sha256-dYP3DjiL7oIiiq3H65tGIXXIT1Waiadmv93JS0sS+8A=",
|
||||
"lastModified": 1770141374,
|
||||
"narHash": "sha256-yD4K/vRHPwXbJf5CK3JkptBA6nFWUKNX/jlFp2eKEQc=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "ed142ab1b3a092c4d149245d0c4126a5d7ea00b0",
|
||||
"rev": "41965737c1797c1d83cfb0b644ed0840a6220bd1",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
60
flake.nix
60
flake.nix
@@ -26,8 +26,8 @@
|
||||
overlays.default = _: prev:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${prev.stdenv.hostPlatform.system};
|
||||
buildGo = pkgs.buildGo125Module;
|
||||
vendorHash = "sha256-dWsDgI5K+8mFw4PA5gfFBPCSqBJp5RcZzm0ML1+HsWw=";
|
||||
buildGo = pkgs.buildGo126Module;
|
||||
vendorHash = "sha256-9BvphYDAxzwooyVokI3l+q1wRuRsWn/qM+NpWUgqJH0=";
|
||||
in
|
||||
{
|
||||
headscale = buildGo {
|
||||
@@ -62,16 +62,16 @@
|
||||
|
||||
protoc-gen-grpc-gateway = buildGo rec {
|
||||
pname = "grpc-gateway";
|
||||
version = "2.27.4";
|
||||
version = "2.27.7";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "grpc-ecosystem";
|
||||
repo = "grpc-gateway";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-4bhEQTVV04EyX/qJGNMIAQDcMWcDVr1tFkEjBHpc2CA=";
|
||||
sha256 = "sha256-6R0EhNnOBEISJddjkbVTcBvUuU5U3r9Hu2UPfAZDep4=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-ohZW/uPdt08Y2EpIQ2yeyGSjV9O58+QbQQqYrs6O8/g=";
|
||||
vendorHash = "sha256-SOAbRrzMf2rbKaG9PGSnPSLY/qZVgbHcNjOLmVonycY=";
|
||||
|
||||
nativeBuildInputs = [ pkgs.installShellFiles ];
|
||||
|
||||
@@ -94,14 +94,46 @@
|
||||
subPackages = [ "." ];
|
||||
};
|
||||
|
||||
# Upstream does not override buildGoModule properly,
|
||||
# importing a specific module, so comment out for now.
|
||||
# golangci-lint = prev.golangci-lint.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
# golangci-lint-langserver = prev.golangci-lint.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
# Build golangci-lint with Go 1.26 (upstream uses hardcoded Go version)
|
||||
golangci-lint = buildGo rec {
|
||||
pname = "golangci-lint";
|
||||
version = "2.8.0";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "golangci";
|
||||
repo = "golangci-lint";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-w6MAOirj8rPHYbKrW4gJeemXCS64fNtteV6IioqIQTQ=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-/Vqo/yrmGh6XipELQ9NDtlMEO2a654XykmvnMs0BdrI=";
|
||||
|
||||
subPackages = [ "cmd/golangci-lint" ];
|
||||
|
||||
nativeBuildInputs = [ pkgs.installShellFiles ];
|
||||
|
||||
ldflags = [
|
||||
"-s"
|
||||
"-w"
|
||||
"-X main.version=${version}"
|
||||
"-X main.commit=v${version}"
|
||||
"-X main.date=1970-01-01T00:00:00Z"
|
||||
];
|
||||
|
||||
postInstall = ''
|
||||
for shell in bash zsh fish; do
|
||||
HOME=$TMPDIR $out/bin/golangci-lint completion $shell > golangci-lint.$shell
|
||||
installShellCompletion golangci-lint.$shell
|
||||
done
|
||||
'';
|
||||
|
||||
meta = {
|
||||
description = "Fast linters runner for Go";
|
||||
homepage = "https://golangci-lint.run/";
|
||||
changelog = "https://github.com/golangci/golangci-lint/blob/v${version}/CHANGELOG.md";
|
||||
mainProgram = "golangci-lint";
|
||||
};
|
||||
};
|
||||
|
||||
# The package uses buildGo125Module, not the convention.
|
||||
# goreleaser = prev.goreleaser.override {
|
||||
@@ -132,7 +164,7 @@
|
||||
overlays = [ self.overlays.default ];
|
||||
inherit system;
|
||||
};
|
||||
buildDeps = with pkgs; [ git go_1_25 gnumake ];
|
||||
buildDeps = with pkgs; [ git go_1_26 gnumake ];
|
||||
devDeps = with pkgs;
|
||||
buildDeps
|
||||
++ [
|
||||
|
||||
138
go.mod
138
go.mod
@@ -1,13 +1,13 @@
|
||||
module github.com/juanfont/headscale
|
||||
|
||||
go 1.25.5
|
||||
go 1.26rc2
|
||||
|
||||
require (
|
||||
github.com/arl/statsviz v0.8.0
|
||||
github.com/cenkalti/backoff/v5 v5.0.3
|
||||
github.com/chasefleming/elem-go v0.31.0
|
||||
github.com/coder/websocket v1.8.14
|
||||
github.com/coreos/go-oidc/v3 v3.16.0
|
||||
github.com/coreos/go-oidc/v3 v3.17.0
|
||||
github.com/creachadair/command v0.2.0
|
||||
github.com/creachadair/flax v0.0.5
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
@@ -15,11 +15,11 @@ require (
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/glebarez/sqlite v1.11.0
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.5
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced
|
||||
github.com/go-json-experiment/json v0.0.0-20251027170946-4849db3c2f7e
|
||||
github.com/gofrs/uuid/v5 v5.4.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7
|
||||
github.com/jagottsicher/termcolor v1.0.2
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25
|
||||
github.com/ory/dockertest/v3 v3.12.0
|
||||
@@ -28,7 +28,7 @@ require (
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/prometheus/common v0.67.5
|
||||
github.com/pterm/pterm v0.12.82
|
||||
github.com/puzpuzpuz/xsync/v4 v4.3.0
|
||||
github.com/puzpuzpuz/xsync/v4 v4.4.0
|
||||
github.com/rs/zerolog v1.34.0
|
||||
github.com/samber/lo v1.52.0
|
||||
github.com/sasha-s/go-deadlock v0.3.6
|
||||
@@ -40,18 +40,18 @@ require (
|
||||
github.com/tailscale/tailsql v0.0.0-20260105194658-001575c3ca09
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
||||
golang.org/x/crypto v0.46.0
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546
|
||||
golang.org/x/net v0.48.0
|
||||
golang.org/x/crypto v0.47.0
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96
|
||||
golang.org/x/net v0.49.0
|
||||
golang.org/x/oauth2 v0.34.0
|
||||
golang.org/x/sync v0.19.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20
|
||||
google.golang.org/grpc v1.78.0
|
||||
google.golang.org/protobuf v1.36.11
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gorm.io/driver/postgres v1.6.0
|
||||
gorm.io/gorm v1.31.1
|
||||
tailscale.com v1.94.0
|
||||
tailscale.com v1.94.1
|
||||
zgo.at/zcache/v2 v2.4.1
|
||||
zombiezen.com/go/postgrestest v1.0.1
|
||||
)
|
||||
@@ -80,6 +80,14 @@ require (
|
||||
modernc.org/sqlite v1.44.3
|
||||
)
|
||||
|
||||
// NOTE: gvisor must be updated in lockstep with
|
||||
// tailscale.com. The version used here should match
|
||||
// the version required by the tailscale.com dependency.
|
||||
// To find the correct version, check tailscale.com's
|
||||
// go.mod file for the gvisor.dev/gvisor version:
|
||||
// https://github.com/tailscale/tailscale/blob/main/go.mod
|
||||
require gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 // indirect
|
||||
|
||||
require (
|
||||
atomicgo.dev/cursor v0.2.0 // indirect
|
||||
atomicgo.dev/keyboard v0.2.9 // indirect
|
||||
@@ -90,102 +98,107 @@ require (
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/akutz/memconn v0.1.0 // indirect
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
|
||||
github.com/aws/smithy-go v1.24.0 // indirect
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02 // indirect
|
||||
github.com/axiomhq/hyperloglog v0.2.6 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0 // indirect
|
||||
github.com/clipperhouse/stringish v0.1.1 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.5.0 // indirect
|
||||
github.com/containerd/console v1.0.5 // indirect
|
||||
github.com/containerd/continuity v0.4.5 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/creachadair/mds v0.25.10 // indirect
|
||||
github.com/creachadair/msync v0.7.1 // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 // indirect
|
||||
github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect
|
||||
github.com/creachadair/mds v0.25.15 // indirect
|
||||
github.com/creachadair/msync v0.8.2 // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20250822163801-6d8e6105c62d // indirect
|
||||
github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v28.5.1+incompatible // indirect
|
||||
github.com/docker/cli v29.2.1+incompatible // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/gaissmai/bart v0.18.0 // indirect
|
||||
github.com/gaissmai/bart v0.26.1 // indirect
|
||||
github.com/glebarez/go-sqlite v1.22.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.2.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/go-github v17.0.0+incompatible // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect
|
||||
github.com/google/go-querystring v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gookit/color v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/hashicorp/go-version v1.8.0 // indirect
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/pgx/v5 v5.7.6 // indirect
|
||||
github.com/jackc/pgx/v5 v5.8.0 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 // indirect
|
||||
github.com/klauspost/compress v1.18.2 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.4.2 // indirect
|
||||
github.com/kamstrup/intmap v0.5.2 // indirect
|
||||
github.com/klauspost/compress v1.18.3 // indirect
|
||||
github.com/lib/pq v1.11.1 // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.8 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.19 // indirect
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 // indirect
|
||||
github.com/mdlayher/socket v0.5.0 // indirect
|
||||
github.com/mdlayher/netlink v1.8.0 // indirect
|
||||
github.com/mdlayher/socket v0.5.1 // indirect
|
||||
github.com/mitchellh/go-ps v1.0.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/moby/api v1.53.0 // indirect
|
||||
github.com/moby/moby/client v0.2.2 // indirect
|
||||
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/morikuni/aec v1.1.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opencontainers/runc v1.3.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490 // indirect
|
||||
github.com/pires/go-proxyproto v0.8.1 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741 // indirect
|
||||
github.com/pires/go-proxyproto v0.9.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus-community/pro-bing v0.4.0 // indirect
|
||||
github.com/prometheus-community/pro-bing v0.7.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/prometheus/procfs v0.19.2 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/safchain/ethtool v0.3.0 // indirect
|
||||
github.com/safchain/ethtool v0.7.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.12.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sirupsen/logrus v1.9.4 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
@@ -193,8 +206,8 @@ require (
|
||||
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect
|
||||
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc // indirect
|
||||
github.com/tailscale/setec v0.0.0-20251203133219-2ab774e4129a // indirect
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 // indirect
|
||||
github.com/tailscale/setec v0.0.0-20260115174028-19d190c5556d // indirect
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20251127225136-f19339b67368 // indirect
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
@@ -202,24 +215,23 @@ require (
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect
|
||||
go.opentelemetry.io/otel v1.39.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.39.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.39.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect
|
||||
go.opentelemetry.io/otel v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.40.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 // indirect
|
||||
golang.org/x/mod v0.30.0 // indirect
|
||||
golang.org/x/mod v0.32.0 // indirect
|
||||
golang.org/x/sys v0.40.0 // indirect
|
||||
golang.org/x/term v0.38.0 // indirect
|
||||
golang.org/x/text v0.32.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools v0.39.0 // indirect
|
||||
golang.org/x/term v0.39.0 // indirect
|
||||
golang.org/x/text v0.33.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
golang.org/x/tools v0.41.0 // indirect
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 // indirect
|
||||
)
|
||||
|
||||
tool (
|
||||
|
||||
309
go.sum
309
go.sum
@@ -33,53 +33,55 @@ github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEV
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A=
|
||||
github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/arl/statsviz v0.8.0 h1:O6GjjVxEDxcByAucOSl29HaGYLXsuwA3ujJw8H9E7/U=
|
||||
github.com/arl/statsviz v0.8.0/go.mod h1:XlrbiT7xYT03xaW9JMMfD8KFUhBOESJwfyNJu83PbB0=
|
||||
github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31 h1:8IwBjuLdqIO1dGB+dZ9zJEl8wzY3bVYxcs0Xyu/Lsc0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31/go.mod h1:8tMBcuVjL4kP/ECEIWTCWtwV2kj6+ouEKl4cqR4iWLw=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 h1:CjMzUs78RDDv4ROu3JnJn/Ig1r6ZD7/T2DXLLRpejic=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16/go.mod h1:uVW4OLBqbJXSHJYA9svT9BluSvvwbzLQ2Crf6UPzR3c=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5 h1:siiQ+jummya9OLPDEyHVb2dLW4aOMe22FGDd0sAfuSw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5/go.mod h1:iHVx2J9pWzITdP5MJY6qWfG34TfD9EA+Qi3eV6qQCXw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 h1:tkVNm99nkJnFo1H9IIQb5QkCiPcvCDn3Pos+IeTbGRA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12/go.mod h1:dIVlquSPUMqEJtx2/W17SM2SuESRaVEhEV9alcMqxjw=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 h1:JBod0SnNqcWQ0+uAyzeRFG1zCHotW8DukumYYyNy0zo=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3/go.mod h1:FHSHmyEUkzRbaFFqqm6bkLAOQHgqhsLmfCahvCBMiyA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 h1:DIBqIrJ7hv+e4CmIk2z3pyKT+3B6qVMgRsawHiR3qso=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7/go.mod h1:vLm00xmBke75UmpNvOcZQ/Q30ZFjbczeLFqGx5urmGo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 h1:NSbvS17MlI2lurYgXnCOLvCFX38sBW4eiVER7+kkgsU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16/go.mod h1:SwT8Tmqd4sA6G1qaGdzWCJN99bUmPGHfRwwq3G5Qb+A=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.93.2 h1:U3ygWUhCpiSPYSHOrRhb3gOl9T5Y3kB8k5Vjs//57bE=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.93.2/go.mod h1:79S2BdqCJpScXZA2y+cpZuocWsjGjJINyXnOsf5DTz8=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0 h1:IOdss+igJDFdic9w3WKwxGCmHqUxydvIhJOm9LJ32Dk=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
|
||||
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
||||
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02 h1:bXAPYSbdYbS5VTy92NIUbeDI1qyggi+JYh5op9IFlcQ=
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c=
|
||||
github.com/axiomhq/hyperloglog v0.2.6 h1:sRhvvF3RIXWQgAXaTphLp4yJiX4S0IN3MWTaAgZoRJw=
|
||||
github.com/axiomhq/hyperloglog v0.2.6/go.mod h1:YjX/dQqCR/7QYX0g8mu8UZAjpIenz1FKM71UEsjFoTo=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
@@ -101,8 +103,10 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/cilium/ebpf v0.17.3 h1:FnP4r16PWYSE4ux6zN+//jMcW4nMVRvuTLVTvCjyyjg=
|
||||
github.com/cilium/ebpf v0.17.3/go.mod h1:G5EDHij8yiLzaqn0WjyfJHvRa+3aDlReIaLVRMvOyJk=
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0 h1:ChwIKnQN3kcZteTXMgb1wztSgaU+ZemkgWdohwgs8tY=
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM=
|
||||
github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs=
|
||||
github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA=
|
||||
github.com/clipperhouse/uax29/v2 v2.5.0 h1:x7T0T4eTHDONxFJsL94uKNKPHrclyFI0lm7+w94cO8U=
|
||||
github.com/clipperhouse/uax29/v2 v2.5.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
|
||||
github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g=
|
||||
github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
@@ -118,38 +122,38 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
|
||||
github.com/coreos/go-oidc/v3 v3.16.0 h1:qRQUCFstKpXwmEjDQTIbyY/5jF00+asXzSkmkoa/mow=
|
||||
github.com/coreos/go-oidc/v3 v3.16.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
|
||||
github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc=
|
||||
github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creachadair/command v0.2.0 h1:qTA9cMMhZePAxFoNdnk6F6nn94s1qPndIg9hJbqI9cA=
|
||||
github.com/creachadair/command v0.2.0/go.mod h1:j+Ar+uYnFsHpkMeV9kGj6lJ45y9u2xqtg8FYy6cm+0o=
|
||||
github.com/creachadair/flax v0.0.5 h1:zt+CRuXQASxwQ68e9GHAOnEgAU29nF0zYMHOCrL5wzE=
|
||||
github.com/creachadair/flax v0.0.5/go.mod h1:F1PML0JZLXSNDMNiRGK2yjm5f+L9QCHchyHBldFymj8=
|
||||
github.com/creachadair/mds v0.25.10 h1:9k9JB35D1xhOCFl0liBhagBBp8fWWkKZrA7UXsfoHtA=
|
||||
github.com/creachadair/mds v0.25.10/go.mod h1:4hatI3hRM+qhzuAmqPRFvaBM8mONkS7nsLxkcuTYUIs=
|
||||
github.com/creachadair/msync v0.7.1 h1:SeZmuEBXQPe5GqV/C94ER7QIZPwtvFbeQiykzt/7uho=
|
||||
github.com/creachadair/msync v0.7.1/go.mod h1:8CcFlLsSujfHE5wWm19uUBLHIPDAUr6LXDwneVMO008=
|
||||
github.com/creachadair/mds v0.25.15 h1:i8CUqtfgbCqbvZ++L7lm8No3cOeic9YKF4vHEvEoj+Y=
|
||||
github.com/creachadair/mds v0.25.15/go.mod h1:XtMfRW15sjd1iOi1Z1k+dq0pRsR5xPbulpoTrpyhk8w=
|
||||
github.com/creachadair/msync v0.8.2 h1:ujvc/SVJPn+bFwmjUHucXNTTn3opVe2YbQ46mBCnP08=
|
||||
github.com/creachadair/msync v0.8.2/go.mod h1:LzxqD9kfIl/O3DczkwOgJplLPqwrTbIhINlf9bHIsEY=
|
||||
github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc=
|
||||
github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g=
|
||||
github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0=
|
||||
github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
|
||||
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
|
||||
github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 h1:vrC07UZcgPzu/OjWsmQKMGg3LoPSz9jh/pQXIrHjUj4=
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0/go.mod h1:Nx87SkVqTKd8UtT+xu7sM/l+LgXs6c0aHrlKusR+2EQ=
|
||||
github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8=
|
||||
github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw=
|
||||
github.com/dblohm7/wingoes v0.0.0-20250822163801-6d8e6105c62d h1:QRKpU+9ZBDs62LyBfwhZkJdB5DJX2Sm3p4kUh7l1aA0=
|
||||
github.com/dblohm7/wingoes v0.0.0-20250822163801-6d8e6105c62d/go.mod h1:SUxUaAK/0UG5lYyZR1L1nC4AaYYvSSYTWQSH3FPcxKU=
|
||||
github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33 h1:ucRHb6/lvW/+mTEIGbvhcYU3S8+uSNkuMjx/qZFfhtM=
|
||||
github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw=
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q=
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
|
||||
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
|
||||
github.com/docker/cli v28.5.1+incompatible h1:ESutzBALAD6qyCLqbQSEf1a/U8Ybms5agw59yGVc+yY=
|
||||
github.com/docker/cli v28.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v29.2.1+incompatible h1:n3Jt0QVCN65eiVBoUTZQM9mcQICCJt3akW4pKAbKdJg=
|
||||
github.com/docker/cli v29.2.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
|
||||
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||
@@ -169,8 +173,8 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/gaissmai/bart v0.18.0 h1:jQLBT/RduJu0pv/tLwXE+xKPgtWJejbxuXAR+wLJafo=
|
||||
github.com/gaissmai/bart v0.18.0/go.mod h1:JJzMAhNF5Rjo4SF4jWBrANuJfqY+FvsFhW7t1UZJ+XY=
|
||||
github.com/gaissmai/bart v0.26.1 h1:+w4rnLGNlA2GDVn382Tfe3jOsK5vOr5n4KmigJ9lbTo=
|
||||
github.com/gaissmai/bart v0.26.1/go.mod h1:GREWQfTLRWz/c5FTOsIw+KkscuFkIV5t8Rp7Nd1Td5c=
|
||||
github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I=
|
||||
github.com/github/fakeca v0.1.0/go.mod h1:+bormgoGMMuamOscx7N91aOuUST7wdaJ2rNjeohylyo=
|
||||
github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ=
|
||||
@@ -183,8 +187,8 @@ github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFS
|
||||
github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced h1:Q311OHjMh/u5E2TITc++WlTP5We0xNseRMkHDyvhW7I=
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
|
||||
github.com/go-json-experiment/json v0.0.0-20251027170946-4849db3c2f7e h1:Lf/gRkoycfOBPa42vU2bbgPurFong6zXeFtPoxholzU=
|
||||
github.com/go-json-experiment/json v0.0.0-20251027170946-4849db3c2f7e/go.mod h1:uNVvRXArCGbZ508SxYYTC5v1JWoz2voff5pm25jU1Ok=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@@ -194,42 +198,42 @@ github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 h1:cf60tHxREO3g1nroKr2osU3JWZsJzkfi7rEg+oAB0Lo=
|
||||
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737/go.mod h1:MIS0jDzbU/vuM9MC4YnBITCv+RYuTRq8dJzmCrFsK9g=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg=
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU=
|
||||
github.com/godbus/dbus/v5 v5.2.2 h1:TUR3TgtSVDmjiXOgAAyaZbYmIeP3DPkld3jgKGV8mXQ=
|
||||
github.com/godbus/dbus/v5 v5.2.2/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c=
|
||||
github.com/gofrs/uuid/v5 v5.4.0 h1:EfbpCTjqMuGyq5ZJwxqzn3Cbr2d0rUZU7v5ycAk/e/0=
|
||||
github.com/gofrs/uuid/v5 v5.4.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/go-querystring v1.2.0 h1:yhqkPbu2/OH+V9BfpCVPZkNmUXhb2gBxJArfhIxNtP0=
|
||||
github.com/google/go-querystring v1.2.0/go.mod h1:8IFJqpSRITyJ8QhQ13bmbeMBDfmeEJZD5A0egEOmkqU=
|
||||
github.com/google/go-tpm v0.9.4 h1:awZRf9FwOeTunQmHoDYSHJps3ie6f1UlhS1fOdPEt1I=
|
||||
github.com/google/go-tpm v0.9.4/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI=
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d h1:KJIErDwbSHjnp/SGzE5ed8Aol7JsKiI5X7yWKAtzhM0=
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
|
||||
github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef h1:xpF9fUHpoIrrjX24DURVKiwHcFpw19ndIs+FwTSMbno=
|
||||
github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
@@ -244,10 +248,10 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 h1:kEISI/Gx67NzH3nJxAmY/dGac80kKZgZt134u7Y/k1s=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4/go.mod h1:6Nz966r3vQYCqIzWsuEl9d7cf7mRhtDmm++sOxlnfxI=
|
||||
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
|
||||
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII=
|
||||
github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=
|
||||
github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
@@ -267,8 +271,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk=
|
||||
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
|
||||
github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo=
|
||||
github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jagottsicher/termcolor v1.0.2 h1:fo0c51pQSuLBN1+yVX2ZE+hE+P7ULb/TY8eRowJnrsM=
|
||||
@@ -282,10 +286,12 @@ github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 h1:JfD4jthWBqZMEffc5RjgmlzpYttAVw1sdnmiNaPO3hE=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1/go.mod h1:xJjT7t59UIZ62GLZbv6PLLo8VFrostJMPBAheR6OM8w=
|
||||
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
|
||||
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/jsimonetti/rtnetlink v1.4.2 h1:Df9w9TZ3npHTyDn0Ev9e1uzmN2odmXd0QX+J5GTEn90=
|
||||
github.com/jsimonetti/rtnetlink v1.4.2/go.mod h1:92s6LJdE+1iOrw+F2/RO7LYI2Qd8pPpFNNUYW06gcoM=
|
||||
github.com/kamstrup/intmap v0.5.2 h1:qnwBm1mh4XAnW9W9Ue9tZtTff8pS6+s6iKF6JRIV2Dk=
|
||||
github.com/kamstrup/intmap v0.5.2/go.mod h1:gWUVWHKzWj8xpJVFf5GC0O26bWmv3GqdnIX/LMT6Aq4=
|
||||
github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw=
|
||||
github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
@@ -306,8 +312,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
||||
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.11.1 h1:wuChtj2hfsGmmx3nf1m7xC2XpK6OtelS2shMY+bGMtI=
|
||||
github.com/lib/pq v1.11.1/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA=
|
||||
github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8LFgLN4=
|
||||
github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
@@ -323,18 +329,22 @@ github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byF
|
||||
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
|
||||
github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw=
|
||||
github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o=
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg=
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o=
|
||||
github.com/mdlayher/netlink v1.8.0 h1:e7XNIYJKD7hUct3Px04RuIGJbBxy1/c4nX7D5YyvvlM=
|
||||
github.com/mdlayher/netlink v1.8.0/go.mod h1:UhgKXUlDQhzb09DrCl2GuRNEglHmhYoWAHid9HK3594=
|
||||
github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c=
|
||||
github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE=
|
||||
github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI=
|
||||
github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI=
|
||||
github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos=
|
||||
github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ=
|
||||
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
|
||||
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
|
||||
github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=
|
||||
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/moby/api v1.53.0 h1:PihqG1ncw4W+8mZs69jlwGXdaYBeb5brF6BL7mPIS/w=
|
||||
github.com/moby/moby/api v1.53.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc=
|
||||
github.com/moby/moby/client v0.2.2 h1:Pt4hRMCAIlyjL3cr8M5TrXCwKzguebPAc2do2ur7dEM=
|
||||
github.com/moby/moby/client v0.2.2/go.mod h1:2EkIPVNCqR05CMIzL1mfA07t0HvVUUOl85pasRz/GmQ=
|
||||
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
|
||||
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
|
||||
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
||||
@@ -343,8 +353,8 @@ github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
|
||||
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/morikuni/aec v1.1.0 h1:vBBl0pUnvi/Je71dsRrhMBtreIqNMYErSAbEeb8jrXQ=
|
||||
github.com/morikuni/aec v1.1.0/go.mod h1:xDRgiq/iw5l+zkao76YTKzKttOp2cwPEne25HDkJnBw=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
@@ -365,14 +375,14 @@ github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXR
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490 h1:QTvNkZ5ylY0PGgA+Lih+GdboMLY/G9SEGLMEGVjTVA4=
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741 h1:KPpdlQLZcHfTMQRi6bFQ7ogNO0ltFT4PmtwTLW4W+14=
|
||||
github.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1 h1:EMacvLRUd2O1K0eWod27ZP5CY1iTNkhBDLSN+Q4JEvA=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1/go.mod h1:qXbiq/2X4ZUMMshsqlWyTHOcw7ns+GZmlqZZN05ZHcQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pires/go-proxyproto v0.8.1 h1:9KEixbdJfhrbtjpz/ZwCdWDD2Xem0NZ38qMYaASJgp0=
|
||||
github.com/pires/go-proxyproto v0.8.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU=
|
||||
github.com/pires/go-proxyproto v0.9.2 h1:H1UdHn695zUVVmB0lQ354lOWHOy6TZSpzBl3tgN0s1U=
|
||||
github.com/pires/go-proxyproto v0.9.2/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
@@ -382,16 +392,16 @@ github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Q
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4=
|
||||
github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4=
|
||||
github.com/prometheus-community/pro-bing v0.7.0 h1:KFYFbxC2f2Fp6c+TyxbCOEarf7rbnzr9Gw8eIb0RfZA=
|
||||
github.com/prometheus-community/pro-bing v0.7.0/go.mod h1:Moob9dvlY50Bfq6i88xIwfyw7xLFHH69LUgx9n5zqCE=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
|
||||
github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=
|
||||
github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw=
|
||||
github.com/pterm/pterm v0.12.27/go.mod h1:PhQ89w4i95rhgE+xedAoqous6K9X+r6aSOI2eFF7DZI=
|
||||
github.com/pterm/pterm v0.12.29/go.mod h1:WI3qxgvoQFFGKGjGnJR849gU0TsEOvKn5Q8LlY1U7lg=
|
||||
github.com/pterm/pterm v0.12.30/go.mod h1:MOqLIyMOgmTDz9yorcYbcw+HsgoZo3BQfg2wtl3HEFE=
|
||||
@@ -401,8 +411,8 @@ github.com/pterm/pterm v0.12.36/go.mod h1:NjiL09hFhT/vWjQHSj1athJpx6H8cjpHXNAK5b
|
||||
github.com/pterm/pterm v0.12.40/go.mod h1:ffwPLwlbXxP+rxT0GsgDTzS3y3rmpAO1NMjUkGTYf8s=
|
||||
github.com/pterm/pterm v0.12.82 h1:+D9wYhCaeaK0FIQoZtqbNQuNpe2lB2tajKKsTd5paVQ=
|
||||
github.com/pterm/pterm v0.12.82/go.mod h1:TyuyrPjnxfwP+ccJdBTeWHtd/e0ybQHkOS/TakajZCw=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.3.0 h1:w/bWkEJdYuRNYhHn5eXnIT8LzDM1O629X1I9MJSkD7Q=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.3.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.4.0 h1:vlSN6/CkEY0pY8KaB0yqo/pCLZvp9nhdbBdjipT4gWo=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.4.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
@@ -412,8 +422,8 @@ github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
|
||||
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0=
|
||||
github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs=
|
||||
github.com/safchain/ethtool v0.7.0 h1:rlJzfDetsVvT61uz8x1YIcFn12akMfuPulHtZjtb7Is=
|
||||
github.com/safchain/ethtool v0.7.0/go.mod h1:MenQKEjXdfkjD3mp2QdCk8B/hwvkrlOTm/FD4gTpFxQ=
|
||||
github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4=
|
||||
github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI=
|
||||
github.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw=
|
||||
@@ -423,8 +433,8 @@ github.com/sasha-s/go-deadlock v0.3.6/go.mod h1:CUqNyyvMxTyjFqDT7MRg9mb4Dv/btmGT
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
|
||||
github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
|
||||
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
|
||||
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
|
||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||
@@ -462,14 +472,14 @@ github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4
|
||||
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0=
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA=
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc=
|
||||
github.com/tailscale/setec v0.0.0-20251203133219-2ab774e4129a h1:TApskGPim53XY5WRt5hX4DnO8V6CmVoimSklryIoGMM=
|
||||
github.com/tailscale/setec v0.0.0-20251203133219-2ab774e4129a/go.mod h1:+6WyG6kub5/5uPsMdYQuSti8i6F5WuKpFWLQnZt/Mms=
|
||||
github.com/tailscale/setec v0.0.0-20260115174028-19d190c5556d h1:N+TtzIaGYREbLbKZB0WU0vVnMSfaqUkSf3qMEi03hwE=
|
||||
github.com/tailscale/setec v0.0.0-20260115174028-19d190c5556d/go.mod h1:6NU8H/GLPVX2TnXAY1duyy9ylLaHwFpr0X93UPiYmNI=
|
||||
github.com/tailscale/squibble v0.0.0-20251104223530-a961feffb67f h1:CL6gu95Y1o2ko4XiWPvWkJka0QmQWcUyPywWVWDPQbQ=
|
||||
github.com/tailscale/squibble v0.0.0-20251104223530-a961feffb67f/go.mod h1:xJkMmR3t+thnUQhA3Q4m2VSlS5pcOq+CIjmU/xfKKx4=
|
||||
github.com/tailscale/tailsql v0.0.0-20260105194658-001575c3ca09 h1:Fc9lE2cDYJbBLpCqnVmoLdf7McPqoHZiDxDPPpkJM04=
|
||||
github.com/tailscale/tailsql v0.0.0-20260105194658-001575c3ca09/go.mod h1:QMNhC4XGFiXKngHVLXE+ERDmQoH0s5fD7AUxupykocQ=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20251127225136-f19339b67368 h1:0tpDdAj9sSfSZg4gMwNTdqMP592sBrq2Sm0w6ipnh7k=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20251127225136-f19339b67368/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
|
||||
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M=
|
||||
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da h1:jVRUZPRs9sqyKlYHHzHjAqKN+6e/Vog6NpHYeNPJqOw=
|
||||
@@ -480,8 +490,8 @@ github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA=
|
||||
github.com/tc-hib/winres v0.2.1/go.mod h1:C/JaNhH3KBvhNKVbvdlDWkbMDO9H4fKKDaN7/07SSuk=
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e h1:IWllFTiDjjLIf2oeKxpIUmtiDV5sn71VgeQgg6vcE7k=
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e/go.mod h1:d7u6HkTYKSv5m6MCKkOQlHwaShTMl3HjqSGW3XtVhXM=
|
||||
github.com/tink-crypto/tink-go/v2 v2.1.0 h1:QXFBguwMwTIaU17EgZpEJWsUSc60b1BAGTzBIoMdmok=
|
||||
github.com/tink-crypto/tink-go/v2 v2.1.0/go.mod h1:y1TnYFt1i2eZVfx4OGc+C+EMp4CoKWAw2VSEuoicHHI=
|
||||
github.com/tink-crypto/tink-go/v2 v2.6.0 h1:+KHNBHhWH33Vn+igZWcsgdEPUxKwBMEe0QC60t388v4=
|
||||
github.com/tink-crypto/tink-go/v2 v2.6.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8=
|
||||
github.com/u-root/u-root v0.14.0 h1:Ka4T10EEML7dQ5XDvO9c3MBN8z4nuSnGjcd1jmU2ivg=
|
||||
github.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1WMluqE=
|
||||
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM=
|
||||
@@ -503,24 +513,24 @@ github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJu
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ=
|
||||
go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
|
||||
go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ=
|
||||
go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
|
||||
go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
|
||||
go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18=
|
||||
go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew=
|
||||
go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
|
||||
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0=
|
||||
go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
|
||||
go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40=
|
||||
go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
|
||||
go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
|
||||
go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
|
||||
go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
@@ -534,25 +544,25 @@ go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/W
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
|
||||
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY=
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
|
||||
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
|
||||
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.27.0 h1:C8gA4oWU/tKkdCfYT6T2u4faJu3MeNS5O8UPWlPF61w=
|
||||
golang.org/x/image v0.27.0/go.mod h1:xbdrClrAUway1MUTEZDq9mz/UpRwYAkFFNUslZtcB+g=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
|
||||
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
|
||||
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
|
||||
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
||||
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
||||
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
||||
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -572,10 +582,8 @@ golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -591,36 +599,35 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
|
||||
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
|
||||
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
|
||||
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
|
||||
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
|
||||
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
|
||||
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b h1:uA40e2M6fYRBf0+8uN5mLlqUtV192iiksiICIBkYJ1E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Xa7le7qx2vmqB/SzWUBa7KdMjpdpAHlh5QCSnjessQk=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 h1:7ei4lp52gK1uSejlA8AZl5AJjeLUOHBQscRQZUgAcu0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20/go.mod h1:ZdbssH/1SOVnjnDlXzxDHK2MCidiqXtbYccJNzNYPEE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 h1:Jr5R2J6F6qWyzINc+4AM8t5pfUz6beZpHp678GNrMbE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
||||
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
|
||||
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
|
||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||
@@ -639,8 +646,8 @@ gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
|
||||
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
|
||||
gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg=
|
||||
gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
|
||||
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k=
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM=
|
||||
honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0 h1:5SXjd4ET5dYijLaf0O3aOenC0Z4ZafIWSpjUzsQaNho=
|
||||
@@ -675,10 +682,12 @@ modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk=
|
||||
pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k=
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
|
||||
tailscale.com v1.94.0 h1:5oW3SF35aU9ekHDhP2J4CHewnA2NxE7SRilDB2pVjaA=
|
||||
tailscale.com v1.94.0/go.mod h1:gLnVrEOP32GWvroaAHHGhjSGMPJ1i4DvqNwEg+Yuov4=
|
||||
tailscale.com v1.94.1 h1:0dAst/ozTuFkgmxZULc3oNwR9+qPIt5ucvzH7kaM0Jw=
|
||||
tailscale.com v1.94.1/go.mod h1:gLnVrEOP32GWvroaAHHGhjSGMPJ1i4DvqNwEg+Yuov4=
|
||||
zgo.at/zcache/v2 v2.4.1 h1:Dfjoi8yI0Uq7NCc4lo2kaQJJmp9Mijo21gef+oJstbY=
|
||||
zgo.at/zcache/v2 v2.4.1/go.mod h1:gyCeoLVo01QjDZynjime8xUGHHMbsLiPyUTBpDGd4Gk=
|
||||
zombiezen.com/go/postgrestest v1.0.1 h1:aXoADQAJmZDU3+xilYVut0pHhgc0sF8ZspPW9gFNwP4=
|
||||
|
||||
137
hscontrol/app.go
137
hscontrol/app.go
@@ -115,13 +115,14 @@ var (
|
||||
|
||||
func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
var err error
|
||||
|
||||
if profilingEnabled {
|
||||
runtime.SetBlockProfileRate(1)
|
||||
}
|
||||
|
||||
noisePrivateKey, err := readOrCreatePrivateKey(cfg.NoisePrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read or create Noise protocol private key: %w", err)
|
||||
return nil, fmt.Errorf("reading or creating Noise protocol private key: %w", err)
|
||||
}
|
||||
|
||||
s, err := state.NewState(cfg)
|
||||
@@ -140,27 +141,30 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
ephemeralGC := db.NewEphemeralGarbageCollector(func(ni types.NodeID) {
|
||||
node, ok := app.state.GetNodeByID(ni)
|
||||
if !ok {
|
||||
log.Error().Uint64("node.id", ni.Uint64()).Msg("Ephemeral node deletion failed")
|
||||
log.Debug().Caller().Uint64("node.id", ni.Uint64()).Msg("Ephemeral node deletion failed because node not found in NodeStore")
|
||||
log.Error().Uint64("node.id", ni.Uint64()).Msg("ephemeral node deletion failed")
|
||||
log.Debug().Caller().Uint64("node.id", ni.Uint64()).Msg("ephemeral node deletion failed because node not found in NodeStore")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
policyChanged, err := app.state.DeleteNode(node)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Uint64("node.id", ni.Uint64()).Str("node.name", node.Hostname()).Msg("Ephemeral node deletion failed")
|
||||
log.Error().Err(err).EmbedObject(node).Msg("ephemeral node deletion failed")
|
||||
return
|
||||
}
|
||||
|
||||
app.Change(policyChanged)
|
||||
log.Debug().Caller().Uint64("node.id", ni.Uint64()).Str("node.name", node.Hostname()).Msg("Ephemeral node deleted because garbage collection timeout reached")
|
||||
log.Debug().Caller().EmbedObject(node).Msg("ephemeral node deleted because garbage collection timeout reached")
|
||||
})
|
||||
app.ephemeralGC = ephemeralGC
|
||||
|
||||
var authProvider AuthProvider
|
||||
|
||||
authProvider = NewAuthProviderWeb(cfg.ServerURL)
|
||||
if cfg.OIDC.Issuer != "" {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
oidcProvider, err := NewAuthProviderOIDC(
|
||||
ctx,
|
||||
&app,
|
||||
@@ -177,17 +181,18 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
authProvider = oidcProvider
|
||||
}
|
||||
}
|
||||
|
||||
app.authProvider = authProvider
|
||||
|
||||
if app.cfg.TailcfgDNSConfig != nil && app.cfg.TailcfgDNSConfig.Proxied { // if MagicDNS
|
||||
// TODO(kradalby): revisit why this takes a list.
|
||||
|
||||
var magicDNSDomains []dnsname.FQDN
|
||||
if cfg.PrefixV4 != nil {
|
||||
magicDNSDomains = append(
|
||||
magicDNSDomains,
|
||||
util.GenerateIPv4DNSRootDomain(*cfg.PrefixV4)...)
|
||||
}
|
||||
|
||||
if cfg.PrefixV6 != nil {
|
||||
magicDNSDomains = append(
|
||||
magicDNSDomains,
|
||||
@@ -198,6 +203,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
if app.cfg.TailcfgDNSConfig.Routes == nil {
|
||||
app.cfg.TailcfgDNSConfig.Routes = make(map[string][]*dnstype.Resolver)
|
||||
}
|
||||
|
||||
for _, d := range magicDNSDomains {
|
||||
app.cfg.TailcfgDNSConfig.Routes[d.WithoutTrailingDot()] = nil
|
||||
}
|
||||
@@ -206,7 +212,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
if cfg.DERP.ServerEnabled {
|
||||
derpServerKey, err := readOrCreatePrivateKey(cfg.DERP.ServerPrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read or create DERP server private key: %w", err)
|
||||
return nil, fmt.Errorf("reading or creating DERP server private key: %w", err)
|
||||
}
|
||||
|
||||
if derpServerKey.Equal(*noisePrivateKey) {
|
||||
@@ -232,6 +238,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
app.DERPServer = embeddedDERPServer
|
||||
}
|
||||
|
||||
@@ -251,9 +258,11 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
lastExpiryCheck := time.Unix(0, 0)
|
||||
|
||||
derpTickerChan := make(<-chan time.Time)
|
||||
|
||||
if h.cfg.DERP.AutoUpdate && h.cfg.DERP.UpdateFrequency != 0 {
|
||||
derpTicker := time.NewTicker(h.cfg.DERP.UpdateFrequency)
|
||||
defer derpTicker.Stop()
|
||||
|
||||
derpTickerChan = derpTicker.C
|
||||
}
|
||||
|
||||
@@ -271,8 +280,10 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
return
|
||||
|
||||
case <-expireTicker.C:
|
||||
var expiredNodeChanges []change.Change
|
||||
var changed bool
|
||||
var (
|
||||
expiredNodeChanges []change.Change
|
||||
changed bool
|
||||
)
|
||||
|
||||
lastExpiryCheck, expiredNodeChanges, changed = h.state.ExpireExpiredNodes(lastExpiryCheck)
|
||||
|
||||
@@ -286,12 +297,14 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
}
|
||||
|
||||
case <-derpTickerChan:
|
||||
log.Info().Msg("Fetching DERPMap updates")
|
||||
derpMap, err := backoff.Retry(ctx, func() (*tailcfg.DERPMap, error) {
|
||||
log.Info().Msg("fetching DERPMap updates")
|
||||
|
||||
derpMap, err := backoff.Retry(ctx, func() (*tailcfg.DERPMap, error) { //nolint:contextcheck
|
||||
derpMap, err := derp.GetDERPMap(h.cfg.DERP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
|
||||
region, _ := h.DERPServer.GenerateRegion()
|
||||
derpMap.Regions[region.RegionID] = ®ion
|
||||
@@ -303,6 +316,7 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
log.Error().Err(err).Msg("failed to build new DERPMap, retrying later")
|
||||
continue
|
||||
}
|
||||
|
||||
h.state.SetDERPMap(derpMap)
|
||||
|
||||
h.Change(change.DERPMap())
|
||||
@@ -311,6 +325,7 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
h.cfg.TailcfgDNSConfig.ExtraRecords = records
|
||||
|
||||
h.Change(change.ExtraRecords())
|
||||
@@ -339,7 +354,7 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
||||
if !ok {
|
||||
return ctx, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
"Retrieving metadata is failed",
|
||||
"retrieving metadata",
|
||||
)
|
||||
}
|
||||
|
||||
@@ -347,7 +362,7 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
||||
if !ok {
|
||||
return ctx, status.Errorf(
|
||||
codes.Unauthenticated,
|
||||
"Authorization token is not supplied",
|
||||
"authorization token not supplied",
|
||||
)
|
||||
}
|
||||
|
||||
@@ -362,7 +377,7 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
||||
|
||||
valid, err := h.state.ValidateAPIKey(strings.TrimPrefix(token, AuthPrefix))
|
||||
if err != nil {
|
||||
return ctx, status.Error(codes.Internal, "failed to validate token")
|
||||
return ctx, status.Error(codes.Internal, "validating token")
|
||||
}
|
||||
|
||||
if !valid {
|
||||
@@ -390,7 +405,8 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
|
||||
writeUnauthorized := func(statusCode int) {
|
||||
writer.WriteHeader(statusCode)
|
||||
if _, err := writer.Write([]byte("Unauthorized")); err != nil {
|
||||
|
||||
if _, err := writer.Write([]byte("Unauthorized")); err != nil { //nolint:noinlineerr
|
||||
log.Error().Err(err).Msg("writing HTTP response failed")
|
||||
}
|
||||
}
|
||||
@@ -401,6 +417,7 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg(`missing "Bearer " prefix in "Authorization" header`)
|
||||
writeUnauthorized(http.StatusUnauthorized)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -412,6 +429,7 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("failed to validate token")
|
||||
writeUnauthorized(http.StatusUnauthorized)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -420,6 +438,7 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("invalid token")
|
||||
writeUnauthorized(http.StatusUnauthorized)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -431,7 +450,7 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
// and will remove it if it is not.
|
||||
func (h *Headscale) ensureUnixSocketIsAbsent() error {
|
||||
// File does not exist, all fine
|
||||
if _, err := os.Stat(h.cfg.UnixSocket); errors.Is(err, os.ErrNotExist) {
|
||||
if _, err := os.Stat(h.cfg.UnixSocket); errors.Is(err, os.ErrNotExist) { //nolint:noinlineerr
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -455,6 +474,7 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
||||
if provider, ok := h.authProvider.(*AuthProviderOIDC); ok {
|
||||
router.HandleFunc("/oidc/callback", provider.OIDCCallbackHandler).Methods(http.MethodGet)
|
||||
}
|
||||
|
||||
router.HandleFunc("/apple", h.AppleConfigMessage).Methods(http.MethodGet)
|
||||
router.HandleFunc("/apple/{platform}", h.ApplePlatformConfig).
|
||||
Methods(http.MethodGet)
|
||||
@@ -484,8 +504,11 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
||||
}
|
||||
|
||||
// Serve launches the HTTP and gRPC server service Headscale and the API.
|
||||
//
|
||||
//nolint:gocyclo // complex server startup function
|
||||
func (h *Headscale) Serve() error {
|
||||
var err error
|
||||
|
||||
capver.CanOldCodeBeCleanedUp()
|
||||
|
||||
if profilingEnabled {
|
||||
@@ -506,12 +529,13 @@ func (h *Headscale) Serve() error {
|
||||
}
|
||||
|
||||
versionInfo := types.GetVersionInfo()
|
||||
log.Info().Str("version", versionInfo.Version).Str("commit", versionInfo.Commit).Msg("Starting Headscale")
|
||||
log.Info().Str("version", versionInfo.Version).Str("commit", versionInfo.Commit).Msg("starting headscale")
|
||||
log.Info().
|
||||
Str("minimum_version", capver.TailscaleVersion(capver.MinSupportedCapabilityVersion)).
|
||||
Msg("Clients with a lower minimum version will be rejected")
|
||||
|
||||
h.mapBatcher = mapper.NewBatcherAndMapper(h.cfg, h.state)
|
||||
|
||||
h.mapBatcher.Start()
|
||||
defer h.mapBatcher.Close()
|
||||
|
||||
@@ -526,7 +550,7 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
derpMap, err := derp.GetDERPMap(h.cfg.DERP)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get DERPMap: %w", err)
|
||||
return fmt.Errorf("getting DERPMap: %w", err)
|
||||
}
|
||||
|
||||
if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
|
||||
@@ -545,6 +569,7 @@ func (h *Headscale) Serve() error {
|
||||
// around between restarts, they will reconnect and the GC will
|
||||
// be cancelled.
|
||||
go h.ephemeralGC.Start()
|
||||
|
||||
ephmNodes := h.state.ListEphemeralNodes()
|
||||
for _, node := range ephmNodes.All() {
|
||||
h.ephemeralGC.Schedule(node.ID(), h.cfg.EphemeralNodeInactivityTimeout)
|
||||
@@ -555,7 +580,9 @@ func (h *Headscale) Serve() error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up extrarecord manager: %w", err)
|
||||
}
|
||||
|
||||
h.cfg.TailcfgDNSConfig.ExtraRecords = h.extraRecordMan.Records()
|
||||
|
||||
go h.extraRecordMan.Run()
|
||||
defer h.extraRecordMan.Close()
|
||||
}
|
||||
@@ -564,6 +591,7 @@ func (h *Headscale) Serve() error {
|
||||
// records updates
|
||||
scheduleCtx, scheduleCancel := context.WithCancel(context.Background())
|
||||
defer scheduleCancel()
|
||||
|
||||
go h.scheduledTasks(scheduleCtx)
|
||||
|
||||
if zl.GlobalLevel() == zl.TraceLevel {
|
||||
@@ -576,6 +604,7 @@ func (h *Headscale) Serve() error {
|
||||
errorGroup := new(errgroup.Group)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
@@ -586,29 +615,30 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
err = h.ensureUnixSocketIsAbsent()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to remove old socket file: %w", err)
|
||||
return fmt.Errorf("removing old socket file: %w", err)
|
||||
}
|
||||
|
||||
socketDir := filepath.Dir(h.cfg.UnixSocket)
|
||||
|
||||
err = util.EnsureDir(socketDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up unix socket: %w", err)
|
||||
}
|
||||
|
||||
socketListener, err := net.Listen("unix", h.cfg.UnixSocket)
|
||||
socketListener, err := new(net.ListenConfig).Listen(context.Background(), "unix", h.cfg.UnixSocket)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set up gRPC socket: %w", err)
|
||||
return fmt.Errorf("setting up gRPC socket: %w", err)
|
||||
}
|
||||
|
||||
// Change socket permissions
|
||||
if err := os.Chmod(h.cfg.UnixSocket, h.cfg.UnixSocketPermission); err != nil {
|
||||
return fmt.Errorf("failed change permission of gRPC socket: %w", err)
|
||||
if err := os.Chmod(h.cfg.UnixSocket, h.cfg.UnixSocketPermission); err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("changing gRPC socket permission: %w", err)
|
||||
}
|
||||
|
||||
grpcGatewayMux := grpcRuntime.NewServeMux()
|
||||
|
||||
// Make the grpc-gateway connect to grpc over socket
|
||||
grpcGatewayConn, err := grpc.Dial(
|
||||
grpcGatewayConn, err := grpc.Dial( //nolint:staticcheck // SA1019: deprecated but supported in 1.x
|
||||
h.cfg.UnixSocket,
|
||||
[]grpc.DialOption{
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
@@ -659,10 +689,13 @@ func (h *Headscale) Serve() error {
|
||||
// https://github.com/soheilhy/cmux/issues/68
|
||||
// https://github.com/soheilhy/cmux/issues/91
|
||||
|
||||
var grpcServer *grpc.Server
|
||||
var grpcListener net.Listener
|
||||
var (
|
||||
grpcServer *grpc.Server
|
||||
grpcListener net.Listener
|
||||
)
|
||||
|
||||
if tlsConfig != nil || h.cfg.GRPCAllowInsecure {
|
||||
log.Info().Msgf("Enabling remote gRPC at %s", h.cfg.GRPCAddr)
|
||||
log.Info().Msgf("enabling remote gRPC at %s", h.cfg.GRPCAddr)
|
||||
|
||||
grpcOptions := []grpc.ServerOption{
|
||||
grpc.ChainUnaryInterceptor(
|
||||
@@ -685,9 +718,9 @@ func (h *Headscale) Serve() error {
|
||||
v1.RegisterHeadscaleServiceServer(grpcServer, newHeadscaleV1APIServer(h))
|
||||
reflection.Register(grpcServer)
|
||||
|
||||
grpcListener, err = net.Listen("tcp", h.cfg.GRPCAddr)
|
||||
grpcListener, err = new(net.ListenConfig).Listen(context.Background(), "tcp", h.cfg.GRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bind to TCP address: %w", err)
|
||||
return fmt.Errorf("binding to TCP address: %w", err)
|
||||
}
|
||||
|
||||
errorGroup.Go(func() error { return grpcServer.Serve(grpcListener) })
|
||||
@@ -715,14 +748,16 @@ func (h *Headscale) Serve() error {
|
||||
}
|
||||
|
||||
var httpListener net.Listener
|
||||
|
||||
if tlsConfig != nil {
|
||||
httpServer.TLSConfig = tlsConfig
|
||||
httpListener, err = tls.Listen("tcp", h.cfg.Addr, tlsConfig)
|
||||
} else {
|
||||
httpListener, err = net.Listen("tcp", h.cfg.Addr)
|
||||
httpListener, err = new(net.ListenConfig).Listen(context.Background(), "tcp", h.cfg.Addr)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bind to TCP address: %w", err)
|
||||
return fmt.Errorf("binding to TCP address: %w", err)
|
||||
}
|
||||
|
||||
errorGroup.Go(func() error { return httpServer.Serve(httpListener) })
|
||||
@@ -738,7 +773,7 @@ func (h *Headscale) Serve() error {
|
||||
if h.cfg.MetricsAddr != "" {
|
||||
debugHTTPListener, err = (&net.ListenConfig{}).Listen(ctx, "tcp", h.cfg.MetricsAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bind to TCP address: %w", err)
|
||||
return fmt.Errorf("binding to TCP address: %w", err)
|
||||
}
|
||||
|
||||
debugHTTPServer = h.debugHTTPServer()
|
||||
@@ -751,19 +786,24 @@ func (h *Headscale) Serve() error {
|
||||
log.Info().Msg("metrics server disabled (metrics_listen_addr is empty)")
|
||||
}
|
||||
|
||||
|
||||
var tailsqlContext context.Context
|
||||
|
||||
if tailsqlEnabled {
|
||||
if h.cfg.Database.Type != types.DatabaseSqlite {
|
||||
//nolint:gocritic // exitAfterDefer: Fatal exits during initialization before servers start
|
||||
log.Fatal().
|
||||
Str("type", h.cfg.Database.Type).
|
||||
Msgf("tailsql only support %q", types.DatabaseSqlite)
|
||||
}
|
||||
|
||||
if tailsqlTSKey == "" {
|
||||
//nolint:gocritic // exitAfterDefer: Fatal exits during initialization before servers start
|
||||
log.Fatal().Msg("tailsql requires TS_AUTHKEY to be set")
|
||||
}
|
||||
|
||||
tailsqlContext = context.Background()
|
||||
go runTailSQLService(ctx, util.TSLogfWrapper(), tailsqlStateDir, h.cfg.Database.Sqlite.Path)
|
||||
|
||||
go runTailSQLService(ctx, util.TSLogfWrapper(), tailsqlStateDir, h.cfg.Database.Sqlite.Path) //nolint:errcheck
|
||||
}
|
||||
|
||||
// Handle common process-killing signals so we can gracefully shut down:
|
||||
@@ -774,6 +814,7 @@ func (h *Headscale) Serve() error {
|
||||
syscall.SIGTERM,
|
||||
syscall.SIGQUIT,
|
||||
syscall.SIGHUP)
|
||||
|
||||
sigFunc := func(c chan os.Signal) {
|
||||
// Wait for a SIGINT or SIGKILL:
|
||||
for {
|
||||
@@ -798,6 +839,7 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
default:
|
||||
info := func(msg string) { log.Info().Msg(msg) }
|
||||
|
||||
log.Info().
|
||||
Str("signal", sig.String()).
|
||||
Msg("Received signal to stop, shutting down gracefully")
|
||||
@@ -854,6 +896,7 @@ func (h *Headscale) Serve() error {
|
||||
if debugHTTPListener != nil {
|
||||
debugHTTPListener.Close()
|
||||
}
|
||||
|
||||
httpListener.Close()
|
||||
grpcGatewayConn.Close()
|
||||
|
||||
@@ -863,6 +906,7 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
// Close state connections
|
||||
info("closing state and database")
|
||||
|
||||
err = h.state.Close()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to close state")
|
||||
@@ -875,6 +919,7 @@ func (h *Headscale) Serve() error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
errorGroup.Go(func() error {
|
||||
sigFunc(sigc)
|
||||
|
||||
@@ -886,6 +931,7 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
var err error
|
||||
|
||||
if h.cfg.TLS.LetsEncrypt.Hostname != "" {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
|
||||
log.Warn().
|
||||
@@ -918,7 +964,6 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
// Configuration via autocert with HTTP-01. This requires listening on
|
||||
// port 80 for the certificate validation in addition to the headscale
|
||||
// service, which can be configured to run on any other port.
|
||||
|
||||
server := &http.Server{
|
||||
Addr: h.cfg.TLS.LetsEncrypt.Listen,
|
||||
Handler: certManager.HTTPHandler(http.HandlerFunc(h.redirect)),
|
||||
@@ -940,13 +985,13 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
}
|
||||
} else if h.cfg.TLS.CertPath == "" {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "http://") {
|
||||
log.Warn().Msg("Listening without TLS but ServerURL does not start with http://")
|
||||
log.Warn().Msg("listening without TLS but ServerURL does not start with http://")
|
||||
}
|
||||
|
||||
return nil, err
|
||||
} else {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
|
||||
log.Warn().Msg("Listening with TLS but ServerURL does not start with https://")
|
||||
log.Warn().Msg("listening with TLS but ServerURL does not start with https://")
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
@@ -963,6 +1008,7 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
|
||||
func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
||||
dir := filepath.Dir(path)
|
||||
|
||||
err := util.EnsureDir(dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ensuring private key directory: %w", err)
|
||||
@@ -970,21 +1016,22 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
||||
|
||||
privateKey, err := os.ReadFile(path)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
log.Info().Str("path", path).Msg("No private key file at path, creating...")
|
||||
log.Info().Str("path", path).Msg("no private key file at path, creating...")
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
machineKeyStr, err := machineKey.MarshalText()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to convert private key to string for saving: %w",
|
||||
"converting private key to string for saving: %w",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
err = os.WriteFile(path, machineKeyStr, privateKeyFileMode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to save private key to disk at path %q: %w",
|
||||
"saving private key to disk at path %q: %w",
|
||||
path,
|
||||
err,
|
||||
)
|
||||
@@ -992,14 +1039,14 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
||||
|
||||
return &machineKey, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("failed to read private key file: %w", err)
|
||||
return nil, fmt.Errorf("reading private key file: %w", err)
|
||||
}
|
||||
|
||||
trimmedPrivateKey := strings.TrimSpace(string(privateKey))
|
||||
|
||||
var machineKey key.MachinePrivate
|
||||
if err = machineKey.UnmarshalText([]byte(trimmedPrivateKey)); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse private key: %w", err)
|
||||
if err = machineKey.UnmarshalText([]byte(trimmedPrivateKey)); err != nil { //nolint:noinlineerr
|
||||
return nil, fmt.Errorf("parsing private key: %w", err)
|
||||
}
|
||||
|
||||
return &machineKey, nil
|
||||
@@ -1023,7 +1070,7 @@ type acmeLogger struct {
|
||||
func (l *acmeLogger) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
resp, err := l.rt.RoundTrip(req)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("url", req.URL.String()).Msg("ACME request failed")
|
||||
log.Error().Err(err).Str("url", req.URL.String()).Msg("acme request failed")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1031,7 +1078,7 @@ func (l *acmeLogger) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
log.Error().Int("status_code", resp.StatusCode).Str("url", req.URL.String()).Bytes("body", body).Msg("ACME request returned error")
|
||||
log.Error().Int("status_code", resp.StatusCode).Str("url", req.URL.String()).Bytes("body", body).Msg("acme request returned error")
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
|
||||
@@ -16,12 +16,11 @@ import (
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
type AuthProvider interface {
|
||||
RegisterHandler(http.ResponseWriter, *http.Request)
|
||||
AuthURL(types.RegistrationID) string
|
||||
RegisterHandler(w http.ResponseWriter, r *http.Request)
|
||||
AuthURL(regID types.RegistrationID) string
|
||||
}
|
||||
|
||||
func (h *Headscale) handleRegister(
|
||||
@@ -42,8 +41,7 @@ func (h *Headscale) handleRegister(
|
||||
// This is a logout attempt (expiry in the past)
|
||||
if node, ok := h.state.GetNodeByNodeKey(req.NodeKey); ok {
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
EmbedObject(node).
|
||||
Bool("is_ephemeral", node.IsEphemeral()).
|
||||
Bool("has_authkey", node.AuthKey().Valid()).
|
||||
Msg("Found existing node for logout, calling handleLogout")
|
||||
@@ -52,6 +50,7 @@ func (h *Headscale) handleRegister(
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("handling logout: %w", err)
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
return resp, nil
|
||||
}
|
||||
@@ -113,8 +112,7 @@ func (h *Headscale) handleRegister(
|
||||
resp, err := h.handleRegisterWithAuthKey(req, machineKey)
|
||||
if err != nil {
|
||||
// Preserve HTTPError types so they can be handled properly by the HTTP layer
|
||||
var httpErr HTTPError
|
||||
if errors.As(err, &httpErr) {
|
||||
if httpErr, ok := errors.AsType[HTTPError](err); ok {
|
||||
return nil, httpErr
|
||||
}
|
||||
|
||||
@@ -133,7 +131,7 @@ func (h *Headscale) handleRegister(
|
||||
}
|
||||
|
||||
// handleLogout checks if the [tailcfg.RegisterRequest] is a
|
||||
// logout attempt from a node. If the node is not attempting to
|
||||
// logout attempt from a node. If the node is not attempting to.
|
||||
func (h *Headscale) handleLogout(
|
||||
node types.NodeView,
|
||||
req tailcfg.RegisterRequest,
|
||||
@@ -155,11 +153,12 @@ func (h *Headscale) handleLogout(
|
||||
// force the client to re-authenticate.
|
||||
// TODO(kradalby): I wonder if this is a path we ever hit?
|
||||
if node.IsExpired() {
|
||||
log.Trace().Str("node.name", node.Hostname()).
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
log.Trace().
|
||||
EmbedObject(node).
|
||||
Interface("reg.req", req).
|
||||
Bool("unexpected", true).
|
||||
Msg("Node key expired, forcing re-authentication")
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
NodeKeyExpired: true,
|
||||
MachineAuthorized: false,
|
||||
@@ -182,8 +181,7 @@ func (h *Headscale) handleLogout(
|
||||
// Zero expiry is handled in handleRegister() before calling this function.
|
||||
if req.Expiry.Before(time.Now()) {
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
EmbedObject(node).
|
||||
Bool("is_ephemeral", node.IsEphemeral()).
|
||||
Bool("has_authkey", node.AuthKey().Valid()).
|
||||
Time("req.expiry", req.Expiry).
|
||||
@@ -191,8 +189,7 @@ func (h *Headscale) handleLogout(
|
||||
|
||||
if node.IsEphemeral() {
|
||||
log.Info().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
EmbedObject(node).
|
||||
Msg("Deleting ephemeral node during logout")
|
||||
|
||||
c, err := h.state.DeleteNode(node)
|
||||
@@ -209,8 +206,7 @@ func (h *Headscale) handleLogout(
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
EmbedObject(node).
|
||||
Msg("Node is not ephemeral, setting expiry instead of deleting")
|
||||
}
|
||||
|
||||
@@ -279,6 +275,7 @@ func (h *Headscale) waitForFollowup(
|
||||
// registration is expired in the cache, instruct the client to try a new registration
|
||||
return h.reqToNewRegisterResponse(req, machineKey)
|
||||
}
|
||||
|
||||
return nodeToRegisterResponse(node.View()), nil
|
||||
}
|
||||
}
|
||||
@@ -316,7 +313,7 @@ func (h *Headscale) reqToNewRegisterResponse(
|
||||
MachineKey: machineKey,
|
||||
NodeKey: req.NodeKey,
|
||||
Hostinfo: hostinfo,
|
||||
LastSeen: ptr.To(time.Now()),
|
||||
LastSeen: new(time.Now()),
|
||||
},
|
||||
)
|
||||
|
||||
@@ -324,7 +321,7 @@ func (h *Headscale) reqToNewRegisterResponse(
|
||||
nodeToRegister.Node.Expiry = &req.Expiry
|
||||
}
|
||||
|
||||
log.Info().Msgf("New followup node registration using key: %s", newRegID)
|
||||
log.Info().Msgf("new followup node registration using key: %s", newRegID)
|
||||
h.state.SetRegistrationCacheEntry(newRegID, nodeToRegister)
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
@@ -344,8 +341,8 @@ func (h *Headscale) handleRegisterWithAuthKey(
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "invalid pre auth key", nil)
|
||||
}
|
||||
var perr types.PAKError
|
||||
if errors.As(err, &perr) {
|
||||
|
||||
if perr, ok := errors.AsType[types.PAKError](err); ok {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, perr.Error(), nil)
|
||||
}
|
||||
|
||||
@@ -355,7 +352,7 @@ func (h *Headscale) handleRegisterWithAuthKey(
|
||||
// If node is not valid, it means an ephemeral node was deleted during logout
|
||||
if !node.Valid() {
|
||||
h.Change(changed)
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil // intentional: no node to return when ephemeral deleted
|
||||
}
|
||||
|
||||
// This is a bit of a back and forth, but we have a bit of a chicken and egg
|
||||
@@ -397,8 +394,7 @@ func (h *Headscale) handleRegisterWithAuthKey(
|
||||
Caller().
|
||||
Interface("reg.resp", resp).
|
||||
Interface("reg.req", req).
|
||||
Str("node.name", node.Hostname()).
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
EmbedObject(node).
|
||||
Msg("RegisterResponse")
|
||||
|
||||
return resp, nil
|
||||
@@ -435,6 +431,7 @@ func (h *Headscale) handleRegisterInteractive(
|
||||
Str("generated.hostname", hostname).
|
||||
Msg("Received registration request with empty hostname, generated default")
|
||||
}
|
||||
|
||||
hostinfo.Hostname = hostname
|
||||
|
||||
nodeToRegister := types.NewRegisterNode(
|
||||
@@ -443,7 +440,7 @@ func (h *Headscale) handleRegisterInteractive(
|
||||
MachineKey: machineKey,
|
||||
NodeKey: req.NodeKey,
|
||||
Hostinfo: hostinfo,
|
||||
LastSeen: ptr.To(time.Now()),
|
||||
LastSeen: new(time.Now()),
|
||||
},
|
||||
)
|
||||
|
||||
@@ -456,7 +453,7 @@ func (h *Headscale) handleRegisterInteractive(
|
||||
nodeToRegister,
|
||||
)
|
||||
|
||||
log.Info().Msgf("Starting node registration using key: %s", registrationId)
|
||||
log.Info().Msgf("starting node registration using key: %s", registrationId)
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
AuthURL: h.authProvider.AuthURL(registrationId),
|
||||
|
||||
@@ -625,6 +625,152 @@ func TestTaggedNodeReauthPreservesDisabledExpiry(t *testing.T) {
|
||||
"Tagged node should have expiry PRESERVED as disabled after re-auth")
|
||||
}
|
||||
|
||||
// TestExpiryDuringPersonalToTaggedConversion tests that when a personal node
|
||||
// is converted to tagged via reauth with RequestTags, the expiry is cleared to nil.
|
||||
// BUG #3048: Previously expiry was NOT cleared because expiry handling ran
|
||||
// BEFORE processReauthTags.
|
||||
func TestExpiryDuringPersonalToTaggedConversion(t *testing.T) {
|
||||
app := createTestApp(t)
|
||||
user := app.state.CreateUserForTest("expiry-test-user")
|
||||
|
||||
// Update policy to allow user to own tags
|
||||
err := app.state.UpdatePolicyManagerUsersForTest()
|
||||
require.NoError(t, err)
|
||||
|
||||
policy := `{
|
||||
"tagOwners": {
|
||||
"tag:server": ["expiry-test-user@"]
|
||||
},
|
||||
"acls": [{"action": "accept", "src": ["*"], "dst": ["*:*"]}]
|
||||
}`
|
||||
_, err = app.state.SetPolicy([]byte(policy))
|
||||
require.NoError(t, err)
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey1 := key.NewNode()
|
||||
|
||||
// Step 1: Create user-owned node WITH expiry set
|
||||
clientExpiry := time.Now().Add(24 * time.Hour)
|
||||
registrationID1 := types.MustRegistrationID()
|
||||
regEntry1 := types.NewRegisterNode(types.Node{
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey1.Public(),
|
||||
Hostname: "personal-to-tagged",
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "personal-to-tagged",
|
||||
RequestTags: []string{}, // No tags - user-owned
|
||||
},
|
||||
Expiry: &clientExpiry,
|
||||
})
|
||||
app.state.SetRegistrationCacheEntry(registrationID1, regEntry1)
|
||||
|
||||
node, _, err := app.state.HandleNodeFromAuthPath(
|
||||
registrationID1, types.UserID(user.ID), nil, "webauth",
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.False(t, node.IsTagged(), "Node should be user-owned initially")
|
||||
require.True(t, node.Expiry().Valid(), "User-owned node should have expiry set")
|
||||
|
||||
// Step 2: Re-auth with tags (Personal → Tagged conversion)
|
||||
nodeKey2 := key.NewNode()
|
||||
registrationID2 := types.MustRegistrationID()
|
||||
regEntry2 := types.NewRegisterNode(types.Node{
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey2.Public(),
|
||||
Hostname: "personal-to-tagged",
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "personal-to-tagged",
|
||||
RequestTags: []string{"tag:server"}, // Adding tags
|
||||
},
|
||||
Expiry: &clientExpiry, // Client still sends expiry
|
||||
})
|
||||
app.state.SetRegistrationCacheEntry(registrationID2, regEntry2)
|
||||
|
||||
nodeAfter, _, err := app.state.HandleNodeFromAuthPath(
|
||||
registrationID2, types.UserID(user.ID), nil, "webauth",
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.True(t, nodeAfter.IsTagged(), "Node should be tagged after conversion")
|
||||
|
||||
// CRITICAL ASSERTION: Tagged nodes should NOT have expiry
|
||||
assert.False(t, nodeAfter.Expiry().Valid(),
|
||||
"Tagged node should have expiry cleared to nil")
|
||||
}
|
||||
|
||||
// TestExpiryDuringTaggedToPersonalConversion tests that when a tagged node
|
||||
// is converted to personal via reauth with empty RequestTags, expiry is set
|
||||
// from the client request.
|
||||
// BUG #3048: Previously expiry was NOT set because expiry handling ran
|
||||
// BEFORE processReauthTags (node was still tagged at check time).
|
||||
func TestExpiryDuringTaggedToPersonalConversion(t *testing.T) {
|
||||
app := createTestApp(t)
|
||||
user := app.state.CreateUserForTest("expiry-test-user2")
|
||||
|
||||
// Update policy to allow user to own tags
|
||||
err := app.state.UpdatePolicyManagerUsersForTest()
|
||||
require.NoError(t, err)
|
||||
|
||||
policy := `{
|
||||
"tagOwners": {
|
||||
"tag:server": ["expiry-test-user2@"]
|
||||
},
|
||||
"acls": [{"action": "accept", "src": ["*"], "dst": ["*:*"]}]
|
||||
}`
|
||||
_, err = app.state.SetPolicy([]byte(policy))
|
||||
require.NoError(t, err)
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey1 := key.NewNode()
|
||||
|
||||
// Step 1: Create tagged node (expiry should be nil)
|
||||
registrationID1 := types.MustRegistrationID()
|
||||
regEntry1 := types.NewRegisterNode(types.Node{
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey1.Public(),
|
||||
Hostname: "tagged-to-personal",
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "tagged-to-personal",
|
||||
RequestTags: []string{"tag:server"}, // Tagged node
|
||||
},
|
||||
})
|
||||
app.state.SetRegistrationCacheEntry(registrationID1, regEntry1)
|
||||
|
||||
node, _, err := app.state.HandleNodeFromAuthPath(
|
||||
registrationID1, types.UserID(user.ID), nil, "webauth",
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.True(t, node.IsTagged(), "Node should be tagged initially")
|
||||
require.False(t, node.Expiry().Valid(), "Tagged node should have nil expiry")
|
||||
|
||||
// Step 2: Re-auth with empty tags (Tagged → Personal conversion)
|
||||
nodeKey2 := key.NewNode()
|
||||
clientExpiry := time.Now().Add(48 * time.Hour)
|
||||
registrationID2 := types.MustRegistrationID()
|
||||
regEntry2 := types.NewRegisterNode(types.Node{
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey2.Public(),
|
||||
Hostname: "tagged-to-personal",
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "tagged-to-personal",
|
||||
RequestTags: []string{}, // Empty tags - convert to user-owned
|
||||
},
|
||||
Expiry: &clientExpiry, // Client requests expiry
|
||||
})
|
||||
app.state.SetRegistrationCacheEntry(registrationID2, regEntry2)
|
||||
|
||||
nodeAfter, _, err := app.state.HandleNodeFromAuthPath(
|
||||
registrationID2, types.UserID(user.ID), nil, "webauth",
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.False(t, nodeAfter.IsTagged(), "Node should be user-owned after conversion")
|
||||
|
||||
// CRITICAL ASSERTION: User-owned nodes should have expiry from client
|
||||
assert.True(t, nodeAfter.Expiry().Valid(),
|
||||
"User-owned node should have expiry set")
|
||||
assert.WithinDuration(t, clientExpiry, nodeAfter.Expiry().Get(), 5*time.Second,
|
||||
"Expiry should match client request")
|
||||
}
|
||||
|
||||
// TestReAuthWithDifferentMachineKey tests the edge case where a node attempts
|
||||
// to re-authenticate with the same NodeKey but a DIFFERENT MachineKey.
|
||||
// This scenario should be handled gracefully (currently creates a new node).
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -40,6 +40,7 @@ var tailscaleToCapVer = map[string]tailcfg.CapabilityVersion{
|
||||
"v1.88": 125,
|
||||
"v1.90": 130,
|
||||
"v1.92": 131,
|
||||
"v1.94": 131,
|
||||
}
|
||||
|
||||
var capVerToTailscaleVer = map[tailcfg.CapabilityVersion]string{
|
||||
|
||||
@@ -9,10 +9,9 @@ var tailscaleLatestMajorMinorTests = []struct {
|
||||
stripV bool
|
||||
expected []string
|
||||
}{
|
||||
{3, false, []string{"v1.88", "v1.90", "v1.92"}},
|
||||
{2, true, []string{"1.90", "1.92"}},
|
||||
{3, false, []string{"v1.90", "v1.92", "v1.94"}},
|
||||
{2, true, []string{"1.92", "1.94"}},
|
||||
{10, true, []string{
|
||||
"1.74",
|
||||
"1.76",
|
||||
"1.78",
|
||||
"1.80",
|
||||
@@ -22,6 +21,7 @@ var tailscaleLatestMajorMinorTests = []struct {
|
||||
"1.88",
|
||||
"1.90",
|
||||
"1.92",
|
||||
"1.94",
|
||||
}},
|
||||
{0, false, nil},
|
||||
}
|
||||
|
||||
@@ -77,8 +77,8 @@ func (hsdb *HSDatabase) CreateAPIKey(
|
||||
Expiration: expiration,
|
||||
}
|
||||
|
||||
if err := hsdb.DB.Save(&key).Error; err != nil {
|
||||
return "", nil, fmt.Errorf("failed to save API key to database: %w", err)
|
||||
if err := hsdb.DB.Save(&key).Error; err != nil { //nolint:noinlineerr
|
||||
return "", nil, fmt.Errorf("saving API key to database: %w", err)
|
||||
}
|
||||
|
||||
return keyStr, &key, nil
|
||||
@@ -87,7 +87,9 @@ func (hsdb *HSDatabase) CreateAPIKey(
|
||||
// ListAPIKeys returns the list of ApiKeys for a user.
|
||||
func (hsdb *HSDatabase) ListAPIKeys() ([]types.APIKey, error) {
|
||||
keys := []types.APIKey{}
|
||||
if err := hsdb.DB.Find(&keys).Error; err != nil {
|
||||
|
||||
err := hsdb.DB.Find(&keys).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -126,7 +128,8 @@ func (hsdb *HSDatabase) DestroyAPIKey(key types.APIKey) error {
|
||||
|
||||
// ExpireAPIKey marks a ApiKey as expired.
|
||||
func (hsdb *HSDatabase) ExpireAPIKey(key *types.APIKey) error {
|
||||
if err := hsdb.DB.Model(&key).Update("Expiration", time.Now()).Error; err != nil {
|
||||
err := hsdb.DB.Model(&key).Update("Expiration", time.Now()).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
"gorm.io/gorm/schema"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"zgo.at/zcache/v2"
|
||||
)
|
||||
|
||||
@@ -53,6 +52,8 @@ type HSDatabase struct {
|
||||
|
||||
// NewHeadscaleDatabase creates a new database connection and runs migrations.
|
||||
// It accepts the full configuration to allow migrations access to policy settings.
|
||||
//
|
||||
//nolint:gocyclo // complex database initialization with many migrations
|
||||
func NewHeadscaleDatabase(
|
||||
cfg *types.Config,
|
||||
regCache *zcache.Cache[types.RegistrationID, types.RegisterNode],
|
||||
@@ -76,7 +77,7 @@ func NewHeadscaleDatabase(
|
||||
ID: "202501221827",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
// Remove any invalid routes associated with a node that does not exist.
|
||||
if tx.Migrator().HasTable(&types.Route{}) && tx.Migrator().HasTable(&types.Node{}) {
|
||||
if tx.Migrator().HasTable(&types.Route{}) && tx.Migrator().HasTable(&types.Node{}) { //nolint:staticcheck // SA1019: Route kept for migrations
|
||||
err := tx.Exec("delete from routes where node_id not in (select id from nodes)").Error
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -84,14 +85,14 @@ func NewHeadscaleDatabase(
|
||||
}
|
||||
|
||||
// Remove any invalid routes without a node_id.
|
||||
if tx.Migrator().HasTable(&types.Route{}) {
|
||||
if tx.Migrator().HasTable(&types.Route{}) { //nolint:staticcheck // SA1019: Route kept for migrations
|
||||
err := tx.Exec("delete from routes where node_id is null").Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := tx.AutoMigrate(&types.Route{})
|
||||
err := tx.AutoMigrate(&types.Route{}) //nolint:staticcheck // SA1019: Route kept for migrations
|
||||
if err != nil {
|
||||
return fmt.Errorf("automigrating types.Route: %w", err)
|
||||
}
|
||||
@@ -109,6 +110,7 @@ func NewHeadscaleDatabase(
|
||||
if err != nil {
|
||||
return fmt.Errorf("automigrating types.PreAuthKey: %w", err)
|
||||
}
|
||||
|
||||
err = tx.AutoMigrate(&types.Node{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("automigrating types.Node: %w", err)
|
||||
@@ -155,7 +157,8 @@ AND auth_key_id NOT IN (
|
||||
|
||||
nodeRoutes := map[uint64][]netip.Prefix{}
|
||||
|
||||
var routes []types.Route
|
||||
var routes []types.Route //nolint:staticcheck // SA1019: Route kept for migrations
|
||||
|
||||
err = tx.Find(&routes).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching routes: %w", err)
|
||||
@@ -168,10 +171,10 @@ AND auth_key_id NOT IN (
|
||||
}
|
||||
|
||||
for nodeID, routes := range nodeRoutes {
|
||||
tsaddr.SortPrefixes(routes)
|
||||
slices.SortFunc(routes, netip.Prefix.Compare)
|
||||
routes = slices.Compact(routes)
|
||||
|
||||
data, err := json.Marshal(routes)
|
||||
data, _ := json.Marshal(routes)
|
||||
|
||||
err = tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", data).Error
|
||||
if err != nil {
|
||||
@@ -180,7 +183,7 @@ AND auth_key_id NOT IN (
|
||||
}
|
||||
|
||||
// Drop the old table.
|
||||
_ = tx.Migrator().DropTable(&types.Route{})
|
||||
_ = tx.Migrator().DropTable(&types.Route{}) //nolint:staticcheck // SA1019: Route kept for migrations
|
||||
|
||||
return nil
|
||||
},
|
||||
@@ -245,21 +248,24 @@ AND auth_key_id NOT IN (
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
// Only run on SQLite
|
||||
if cfg.Database.Type != types.DatabaseSqlite {
|
||||
log.Info().Msg("Skipping schema migration on non-SQLite database")
|
||||
log.Info().Msg("skipping schema migration on non-SQLite database")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info().Msg("Starting schema recreation with table renaming")
|
||||
log.Info().Msg("starting schema recreation with table renaming")
|
||||
|
||||
// Rename existing tables to _old versions
|
||||
tablesToRename := []string{"users", "pre_auth_keys", "api_keys", "nodes", "policies"}
|
||||
|
||||
// Check if routes table exists and drop it (should have been migrated already)
|
||||
var routesExists bool
|
||||
|
||||
err := tx.Raw("SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='routes'").Row().Scan(&routesExists)
|
||||
if err == nil && routesExists {
|
||||
log.Info().Msg("Dropping leftover routes table")
|
||||
if err := tx.Exec("DROP TABLE routes").Error; err != nil {
|
||||
log.Info().Msg("dropping leftover routes table")
|
||||
|
||||
err := tx.Exec("DROP TABLE routes").Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("dropping routes table: %w", err)
|
||||
}
|
||||
}
|
||||
@@ -281,6 +287,7 @@ AND auth_key_id NOT IN (
|
||||
for _, table := range tablesToRename {
|
||||
// Check if table exists before renaming
|
||||
var exists bool
|
||||
|
||||
err := tx.Raw("SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name=?", table).Row().Scan(&exists)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking if table %s exists: %w", table, err)
|
||||
@@ -291,7 +298,8 @@ AND auth_key_id NOT IN (
|
||||
_ = tx.Exec("DROP TABLE IF EXISTS " + table + "_old").Error
|
||||
|
||||
// Rename current table to _old
|
||||
if err := tx.Exec("ALTER TABLE " + table + " RENAME TO " + table + "_old").Error; err != nil {
|
||||
err := tx.Exec("ALTER TABLE " + table + " RENAME TO " + table + "_old").Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("renaming table %s to %s_old: %w", table, table, err)
|
||||
}
|
||||
}
|
||||
@@ -365,7 +373,8 @@ AND auth_key_id NOT IN (
|
||||
}
|
||||
|
||||
for _, createSQL := range tableCreationSQL {
|
||||
if err := tx.Exec(createSQL).Error; err != nil {
|
||||
err := tx.Exec(createSQL).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating new table: %w", err)
|
||||
}
|
||||
}
|
||||
@@ -394,7 +403,8 @@ AND auth_key_id NOT IN (
|
||||
}
|
||||
|
||||
for _, copySQL := range dataCopySQL {
|
||||
if err := tx.Exec(copySQL).Error; err != nil {
|
||||
err := tx.Exec(copySQL).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("copying data: %w", err)
|
||||
}
|
||||
}
|
||||
@@ -417,19 +427,21 @@ AND auth_key_id NOT IN (
|
||||
}
|
||||
|
||||
for _, indexSQL := range indexes {
|
||||
if err := tx.Exec(indexSQL).Error; err != nil {
|
||||
err := tx.Exec(indexSQL).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating index: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Drop old tables only after everything succeeds
|
||||
for _, table := range tablesToRename {
|
||||
if err := tx.Exec("DROP TABLE IF EXISTS " + table + "_old").Error; err != nil {
|
||||
log.Warn().Str("table", table+"_old").Err(err).Msg("Failed to drop old table, but migration succeeded")
|
||||
err := tx.Exec("DROP TABLE IF EXISTS " + table + "_old").Error
|
||||
if err != nil {
|
||||
log.Warn().Str("table", table+"_old").Err(err).Msg("failed to drop old table, but migration succeeded")
|
||||
}
|
||||
}
|
||||
|
||||
log.Info().Msg("Schema recreation completed successfully")
|
||||
log.Info().Msg("schema recreation completed successfully")
|
||||
|
||||
return nil
|
||||
},
|
||||
@@ -595,12 +607,12 @@ AND auth_key_id NOT IN (
|
||||
// 1. Load policy from file or database based on configuration
|
||||
policyData, err := PolicyBytes(tx, cfg)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to load policy, skipping RequestTags migration (tags will be validated on node reconnect)")
|
||||
log.Warn().Err(err).Msg("failed to load policy, skipping RequestTags migration (tags will be validated on node reconnect)")
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(policyData) == 0 {
|
||||
log.Info().Msg("No policy found, skipping RequestTags migration (tags will be validated on node reconnect)")
|
||||
log.Info().Msg("no policy found, skipping RequestTags migration (tags will be validated on node reconnect)")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -618,7 +630,7 @@ AND auth_key_id NOT IN (
|
||||
// 3. Create PolicyManager (handles HuJSON parsing, groups, nested tags, etc.)
|
||||
polMan, err := policy.NewPolicyManager(policyData, users, nodes.ViewSlice())
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to parse policy, skipping RequestTags migration (tags will be validated on node reconnect)")
|
||||
log.Warn().Err(err).Msg("failed to parse policy, skipping RequestTags migration (tags will be validated on node reconnect)")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -652,8 +664,7 @@ AND auth_key_id NOT IN (
|
||||
if len(validatedTags) == 0 {
|
||||
if len(rejectedTags) > 0 {
|
||||
log.Debug().
|
||||
Uint64("node.id", uint64(node.ID)).
|
||||
Str("node.name", node.Hostname).
|
||||
EmbedObject(node).
|
||||
Strs("rejected_tags", rejectedTags).
|
||||
Msg("RequestTags rejected during migration (not authorized)")
|
||||
}
|
||||
@@ -676,8 +687,7 @@ AND auth_key_id NOT IN (
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Uint64("node.id", uint64(node.ID)).
|
||||
Str("node.name", node.Hostname).
|
||||
EmbedObject(node).
|
||||
Strs("validated_tags", validatedTags).
|
||||
Strs("rejected_tags", rejectedTags).
|
||||
Strs("existing_tags", existingTags).
|
||||
@@ -762,6 +772,7 @@ AND auth_key_id NOT IN (
|
||||
|
||||
// or else it blocks...
|
||||
sqlConn.SetMaxIdleConns(maxIdleConns)
|
||||
|
||||
sqlConn.SetMaxOpenConns(maxOpenConns)
|
||||
defer sqlConn.SetMaxIdleConns(1)
|
||||
defer sqlConn.SetMaxOpenConns(1)
|
||||
@@ -779,7 +790,7 @@ AND auth_key_id NOT IN (
|
||||
},
|
||||
}
|
||||
|
||||
if err := squibble.Validate(ctx, sqlConn, dbSchema, &opts); err != nil {
|
||||
if err := squibble.Validate(ctx, sqlConn, dbSchema, &opts); err != nil { //nolint:noinlineerr
|
||||
return nil, fmt.Errorf("validating schema: %w", err)
|
||||
}
|
||||
}
|
||||
@@ -805,6 +816,7 @@ func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) {
|
||||
switch cfg.Type {
|
||||
case types.DatabaseSqlite:
|
||||
dir := filepath.Dir(cfg.Sqlite.Path)
|
||||
|
||||
err := util.EnsureDir(dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating directory for sqlite: %w", err)
|
||||
@@ -858,7 +870,7 @@ func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) {
|
||||
Str("path", dbString).
|
||||
Msg("Opening database")
|
||||
|
||||
if sslEnabled, err := strconv.ParseBool(cfg.Postgres.Ssl); err == nil {
|
||||
if sslEnabled, err := strconv.ParseBool(cfg.Postgres.Ssl); err == nil { //nolint:noinlineerr
|
||||
if !sslEnabled {
|
||||
dbString += " sslmode=disable"
|
||||
}
|
||||
@@ -913,7 +925,7 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig
|
||||
|
||||
// Get the current foreign key status
|
||||
var fkOriginallyEnabled int
|
||||
if err := dbConn.Raw("PRAGMA foreign_keys").Scan(&fkOriginallyEnabled).Error; err != nil {
|
||||
if err := dbConn.Raw("PRAGMA foreign_keys").Scan(&fkOriginallyEnabled).Error; err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("checking foreign key status: %w", err)
|
||||
}
|
||||
|
||||
@@ -937,33 +949,36 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig
|
||||
}
|
||||
|
||||
for _, migrationID := range migrationIDs {
|
||||
log.Trace().Caller().Str("migration_id", migrationID).Msg("Running migration")
|
||||
log.Trace().Caller().Str("migration_id", migrationID).Msg("running migration")
|
||||
needsFKDisabled := migrationsRequiringFKDisabled[migrationID]
|
||||
|
||||
if needsFKDisabled {
|
||||
// Disable foreign keys for this migration
|
||||
if err := dbConn.Exec("PRAGMA foreign_keys = OFF").Error; err != nil {
|
||||
err := dbConn.Exec("PRAGMA foreign_keys = OFF").Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("disabling foreign keys for migration %s: %w", migrationID, err)
|
||||
}
|
||||
} else {
|
||||
// Ensure foreign keys are enabled for this migration
|
||||
if err := dbConn.Exec("PRAGMA foreign_keys = ON").Error; err != nil {
|
||||
err := dbConn.Exec("PRAGMA foreign_keys = ON").Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("enabling foreign keys for migration %s: %w", migrationID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run up to this specific migration (will only run the next pending migration)
|
||||
if err := migrations.MigrateTo(migrationID); err != nil {
|
||||
err := migrations.MigrateTo(migrationID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("running migration %s: %w", migrationID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := dbConn.Exec("PRAGMA foreign_keys = ON").Error; err != nil {
|
||||
if err := dbConn.Exec("PRAGMA foreign_keys = ON").Error; err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("restoring foreign keys: %w", err)
|
||||
}
|
||||
|
||||
// Run the rest of the migrations
|
||||
if err := migrations.Migrate(); err != nil {
|
||||
if err := migrations.Migrate(); err != nil { //nolint:noinlineerr
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -981,16 +996,22 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var violation constraintViolation
|
||||
if err := rows.Scan(&violation.Table, &violation.RowID, &violation.Parent, &violation.ConstraintIndex); err != nil {
|
||||
|
||||
err := rows.Scan(&violation.Table, &violation.RowID, &violation.Parent, &violation.ConstraintIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
violatedConstraints = append(violatedConstraints, violation)
|
||||
}
|
||||
_ = rows.Close()
|
||||
|
||||
if err := rows.Err(); err != nil { //nolint:noinlineerr
|
||||
return err
|
||||
}
|
||||
|
||||
if len(violatedConstraints) > 0 {
|
||||
for _, violation := range violatedConstraints {
|
||||
@@ -1005,7 +1026,8 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig
|
||||
}
|
||||
} else {
|
||||
// PostgreSQL can run all migrations in one block - no foreign key issues
|
||||
if err := migrations.Migrate(); err != nil {
|
||||
err := migrations.Migrate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -1016,6 +1038,7 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig
|
||||
func (hsdb *HSDatabase) PingDB(ctx context.Context) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
|
||||
sqlDB, err := hsdb.DB.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1031,7 +1054,7 @@ func (hsdb *HSDatabase) Close() error {
|
||||
}
|
||||
|
||||
if hsdb.cfg.Database.Type == types.DatabaseSqlite && hsdb.cfg.Database.Sqlite.WriteAheadLog {
|
||||
db.Exec("VACUUM")
|
||||
db.Exec("VACUUM") //nolint:errcheck,noctx
|
||||
}
|
||||
|
||||
return db.Close()
|
||||
@@ -1040,12 +1063,14 @@ func (hsdb *HSDatabase) Close() error {
|
||||
func (hsdb *HSDatabase) Read(fn func(rx *gorm.DB) error) error {
|
||||
rx := hsdb.DB.Begin()
|
||||
defer rx.Rollback()
|
||||
|
||||
return fn(rx)
|
||||
}
|
||||
|
||||
func Read[T any](db *gorm.DB, fn func(rx *gorm.DB) (T, error)) (T, error) {
|
||||
rx := db.Begin()
|
||||
defer rx.Rollback()
|
||||
|
||||
ret, err := fn(rx)
|
||||
if err != nil {
|
||||
var no T
|
||||
@@ -1058,7 +1083,9 @@ func Read[T any](db *gorm.DB, fn func(rx *gorm.DB) (T, error)) (T, error) {
|
||||
func (hsdb *HSDatabase) Write(fn func(tx *gorm.DB) error) error {
|
||||
tx := hsdb.DB.Begin()
|
||||
defer tx.Rollback()
|
||||
if err := fn(tx); err != nil {
|
||||
|
||||
err := fn(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1068,6 +1095,7 @@ func (hsdb *HSDatabase) Write(fn func(tx *gorm.DB) error) error {
|
||||
func Write[T any](db *gorm.DB, fn func(tx *gorm.DB) (T, error)) (T, error) {
|
||||
tx := db.Begin()
|
||||
defer tx.Rollback()
|
||||
|
||||
ret, err := fn(tx)
|
||||
if err != nil {
|
||||
var no T
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -44,6 +45,7 @@ func TestSQLiteMigrationAndDataValidation(t *testing.T) {
|
||||
|
||||
// Verify api_keys data preservation
|
||||
var apiKeyCount int
|
||||
|
||||
err = hsdb.DB.Raw("SELECT COUNT(*) FROM api_keys").Scan(&apiKeyCount).Error
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, apiKeyCount, "should preserve all 2 api_keys from original schema")
|
||||
@@ -176,7 +178,7 @@ func createSQLiteFromSQLFile(sqlFilePath, dbPath string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = db.Exec(string(schemaContent))
|
||||
_, err = db.ExecContext(context.Background(), string(schemaContent))
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -186,6 +188,7 @@ func createSQLiteFromSQLFile(sqlFilePath, dbPath string) error {
|
||||
func requireConstraintFailed(t *testing.T, err error) {
|
||||
t.Helper()
|
||||
require.Error(t, err)
|
||||
|
||||
if !strings.Contains(err.Error(), "UNIQUE constraint failed:") && !strings.Contains(err.Error(), "violates unique constraint") {
|
||||
require.Failf(t, "expected error to contain a constraint failure, got: %s", err.Error())
|
||||
}
|
||||
@@ -198,7 +201,7 @@ func TestConstraints(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "no-duplicate-username-if-no-oidc",
|
||||
run: func(t *testing.T, db *gorm.DB) {
|
||||
run: func(t *testing.T, db *gorm.DB) { //nolint:thelper
|
||||
_, err := CreateUser(db, types.User{Name: "user1"})
|
||||
require.NoError(t, err)
|
||||
_, err = CreateUser(db, types.User{Name: "user1"})
|
||||
@@ -207,7 +210,7 @@ func TestConstraints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "no-oidc-duplicate-username-and-id",
|
||||
run: func(t *testing.T, db *gorm.DB) {
|
||||
run: func(t *testing.T, db *gorm.DB) { //nolint:thelper
|
||||
user := types.User{
|
||||
Model: gorm.Model{ID: 1},
|
||||
Name: "user1",
|
||||
@@ -229,7 +232,7 @@ func TestConstraints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "no-oidc-duplicate-id",
|
||||
run: func(t *testing.T, db *gorm.DB) {
|
||||
run: func(t *testing.T, db *gorm.DB) { //nolint:thelper
|
||||
user := types.User{
|
||||
Model: gorm.Model{ID: 1},
|
||||
Name: "user1",
|
||||
@@ -251,7 +254,7 @@ func TestConstraints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "allow-duplicate-username-cli-then-oidc",
|
||||
run: func(t *testing.T, db *gorm.DB) {
|
||||
run: func(t *testing.T, db *gorm.DB) { //nolint:thelper
|
||||
_, err := CreateUser(db, types.User{Name: "user1"}) // Create CLI username
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -266,7 +269,7 @@ func TestConstraints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "allow-duplicate-username-oidc-then-cli",
|
||||
run: func(t *testing.T, db *gorm.DB) {
|
||||
run: func(t *testing.T, db *gorm.DB) { //nolint:thelper
|
||||
user := types.User{
|
||||
Name: "user1",
|
||||
ProviderIdentifier: sql.NullString{String: "http://test.com/user1", Valid: true},
|
||||
@@ -320,7 +323,7 @@ func TestPostgresMigrationAndDataValidation(t *testing.T) {
|
||||
}
|
||||
|
||||
// Construct the pg_restore command
|
||||
cmd := exec.Command(pgRestorePath, "--verbose", "--if-exists", "--clean", "--no-owner", "--dbname", u.String(), tt.dbPath)
|
||||
cmd := exec.CommandContext(context.Background(), pgRestorePath, "--verbose", "--if-exists", "--clean", "--no-owner", "--dbname", u.String(), tt.dbPath)
|
||||
|
||||
// Set the output streams
|
||||
cmd.Stdout = os.Stdout
|
||||
@@ -401,6 +404,7 @@ func dbForTestWithPath(t *testing.T, sqlFilePath string) *HSDatabase {
|
||||
// skip already-applied migrations and only run new ones.
|
||||
func TestSQLiteAllTestdataMigrations(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
schemas, err := os.ReadDir("testdata/sqlite")
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -27,13 +27,17 @@ func TestEphemeralGarbageCollectorGoRoutineLeak(t *testing.T) {
|
||||
t.Logf("Initial number of goroutines: %d", initialGoroutines)
|
||||
|
||||
// Basic deletion tracking mechanism
|
||||
var deletedIDs []types.NodeID
|
||||
var deleteMutex sync.Mutex
|
||||
var deletionWg sync.WaitGroup
|
||||
var (
|
||||
deletedIDs []types.NodeID
|
||||
deleteMutex sync.Mutex
|
||||
deletionWg sync.WaitGroup
|
||||
)
|
||||
|
||||
deleteFunc := func(nodeID types.NodeID) {
|
||||
deleteMutex.Lock()
|
||||
|
||||
deletedIDs = append(deletedIDs, nodeID)
|
||||
|
||||
deleteMutex.Unlock()
|
||||
deletionWg.Done()
|
||||
}
|
||||
@@ -43,14 +47,17 @@ func TestEphemeralGarbageCollectorGoRoutineLeak(t *testing.T) {
|
||||
go gc.Start()
|
||||
|
||||
// Schedule several nodes for deletion with short expiry
|
||||
const expiry = fifty
|
||||
const numNodes = 100
|
||||
const (
|
||||
expiry = fifty
|
||||
numNodes = 100
|
||||
)
|
||||
|
||||
// Set up wait group for expected deletions
|
||||
|
||||
deletionWg.Add(numNodes)
|
||||
|
||||
for i := 1; i <= numNodes; i++ {
|
||||
gc.Schedule(types.NodeID(i), expiry)
|
||||
gc.Schedule(types.NodeID(i), expiry) //nolint:gosec // safe conversion in test
|
||||
}
|
||||
|
||||
// Wait for all scheduled deletions to complete
|
||||
@@ -63,7 +70,7 @@ func TestEphemeralGarbageCollectorGoRoutineLeak(t *testing.T) {
|
||||
|
||||
// Schedule and immediately cancel to test that part of the code
|
||||
for i := numNodes + 1; i <= numNodes*2; i++ {
|
||||
nodeID := types.NodeID(i)
|
||||
nodeID := types.NodeID(i) //nolint:gosec // safe conversion in test
|
||||
gc.Schedule(nodeID, time.Hour)
|
||||
gc.Cancel(nodeID)
|
||||
}
|
||||
@@ -87,14 +94,18 @@ func TestEphemeralGarbageCollectorGoRoutineLeak(t *testing.T) {
|
||||
// and then reschedules it with a shorter expiry, and verifies that the node is deleted only once.
|
||||
func TestEphemeralGarbageCollectorReschedule(t *testing.T) {
|
||||
// Deletion tracking mechanism
|
||||
var deletedIDs []types.NodeID
|
||||
var deleteMutex sync.Mutex
|
||||
var (
|
||||
deletedIDs []types.NodeID
|
||||
deleteMutex sync.Mutex
|
||||
)
|
||||
|
||||
deletionNotifier := make(chan types.NodeID, 1)
|
||||
|
||||
deleteFunc := func(nodeID types.NodeID) {
|
||||
deleteMutex.Lock()
|
||||
|
||||
deletedIDs = append(deletedIDs, nodeID)
|
||||
|
||||
deleteMutex.Unlock()
|
||||
|
||||
deletionNotifier <- nodeID
|
||||
@@ -102,11 +113,14 @@ func TestEphemeralGarbageCollectorReschedule(t *testing.T) {
|
||||
|
||||
// Start GC
|
||||
gc := NewEphemeralGarbageCollector(deleteFunc)
|
||||
|
||||
go gc.Start()
|
||||
defer gc.Close()
|
||||
|
||||
const shortExpiry = fifty
|
||||
const longExpiry = 1 * time.Hour
|
||||
const (
|
||||
shortExpiry = fifty
|
||||
longExpiry = 1 * time.Hour
|
||||
)
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
|
||||
@@ -136,23 +150,31 @@ func TestEphemeralGarbageCollectorReschedule(t *testing.T) {
|
||||
// and verifies that the node is deleted only once.
|
||||
func TestEphemeralGarbageCollectorCancelAndReschedule(t *testing.T) {
|
||||
// Deletion tracking mechanism
|
||||
var deletedIDs []types.NodeID
|
||||
var deleteMutex sync.Mutex
|
||||
var (
|
||||
deletedIDs []types.NodeID
|
||||
deleteMutex sync.Mutex
|
||||
)
|
||||
|
||||
deletionNotifier := make(chan types.NodeID, 1)
|
||||
|
||||
deleteFunc := func(nodeID types.NodeID) {
|
||||
deleteMutex.Lock()
|
||||
|
||||
deletedIDs = append(deletedIDs, nodeID)
|
||||
|
||||
deleteMutex.Unlock()
|
||||
|
||||
deletionNotifier <- nodeID
|
||||
}
|
||||
|
||||
// Start the GC
|
||||
gc := NewEphemeralGarbageCollector(deleteFunc)
|
||||
|
||||
go gc.Start()
|
||||
defer gc.Close()
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
|
||||
const expiry = fifty
|
||||
|
||||
// Schedule node for deletion
|
||||
@@ -196,14 +218,18 @@ func TestEphemeralGarbageCollectorCancelAndReschedule(t *testing.T) {
|
||||
// It creates a new EphemeralGarbageCollector, schedules a node for deletion, closes the GC, and verifies that the node is not deleted.
|
||||
func TestEphemeralGarbageCollectorCloseBeforeTimerFires(t *testing.T) {
|
||||
// Deletion tracking
|
||||
var deletedIDs []types.NodeID
|
||||
var deleteMutex sync.Mutex
|
||||
var (
|
||||
deletedIDs []types.NodeID
|
||||
deleteMutex sync.Mutex
|
||||
)
|
||||
|
||||
deletionNotifier := make(chan types.NodeID, 1)
|
||||
|
||||
deleteFunc := func(nodeID types.NodeID) {
|
||||
deleteMutex.Lock()
|
||||
|
||||
deletedIDs = append(deletedIDs, nodeID)
|
||||
|
||||
deleteMutex.Unlock()
|
||||
|
||||
deletionNotifier <- nodeID
|
||||
@@ -246,13 +272,18 @@ func TestEphemeralGarbageCollectorScheduleAfterClose(t *testing.T) {
|
||||
t.Logf("Initial number of goroutines: %d", initialGoroutines)
|
||||
|
||||
// Deletion tracking
|
||||
var deletedIDs []types.NodeID
|
||||
var deleteMutex sync.Mutex
|
||||
var (
|
||||
deletedIDs []types.NodeID
|
||||
deleteMutex sync.Mutex
|
||||
)
|
||||
|
||||
nodeDeleted := make(chan struct{})
|
||||
|
||||
deleteFunc := func(nodeID types.NodeID) {
|
||||
deleteMutex.Lock()
|
||||
|
||||
deletedIDs = append(deletedIDs, nodeID)
|
||||
|
||||
deleteMutex.Unlock()
|
||||
close(nodeDeleted) // Signal that deletion happened
|
||||
}
|
||||
@@ -263,10 +294,12 @@ func TestEphemeralGarbageCollectorScheduleAfterClose(t *testing.T) {
|
||||
// Use a WaitGroup to ensure the GC has started
|
||||
var startWg sync.WaitGroup
|
||||
startWg.Add(1)
|
||||
|
||||
go func() {
|
||||
startWg.Done() // Signal that the goroutine has started
|
||||
gc.Start()
|
||||
}()
|
||||
|
||||
startWg.Wait() // Wait for the GC to start
|
||||
|
||||
// Close GC right away
|
||||
@@ -288,7 +321,9 @@ func TestEphemeralGarbageCollectorScheduleAfterClose(t *testing.T) {
|
||||
|
||||
// Check no node was deleted
|
||||
deleteMutex.Lock()
|
||||
|
||||
nodesDeleted := len(deletedIDs)
|
||||
|
||||
deleteMutex.Unlock()
|
||||
assert.Equal(t, 0, nodesDeleted, "No nodes should be deleted when Schedule is called after Close")
|
||||
|
||||
@@ -311,12 +346,16 @@ func TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) {
|
||||
t.Logf("Initial number of goroutines: %d", initialGoroutines)
|
||||
|
||||
// Deletion tracking mechanism
|
||||
var deletedIDs []types.NodeID
|
||||
var deleteMutex sync.Mutex
|
||||
var (
|
||||
deletedIDs []types.NodeID
|
||||
deleteMutex sync.Mutex
|
||||
)
|
||||
|
||||
deleteFunc := func(nodeID types.NodeID) {
|
||||
deleteMutex.Lock()
|
||||
|
||||
deletedIDs = append(deletedIDs, nodeID)
|
||||
|
||||
deleteMutex.Unlock()
|
||||
}
|
||||
|
||||
@@ -325,8 +364,10 @@ func TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) {
|
||||
go gc.Start()
|
||||
|
||||
// Number of concurrent scheduling goroutines
|
||||
const numSchedulers = 10
|
||||
const nodesPerScheduler = 50
|
||||
const (
|
||||
numSchedulers = 10
|
||||
nodesPerScheduler = 50
|
||||
)
|
||||
|
||||
const closeAfterNodes = 25 // Close GC after this many nodes per scheduler
|
||||
|
||||
@@ -353,8 +394,8 @@ func TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) {
|
||||
case <-stopScheduling:
|
||||
return
|
||||
default:
|
||||
nodeID := types.NodeID(baseNodeID + j + 1)
|
||||
gc.Schedule(nodeID, 1*time.Hour) // Long expiry to ensure it doesn't trigger during test
|
||||
nodeID := types.NodeID(baseNodeID + j + 1) //nolint:gosec // safe conversion in test
|
||||
gc.Schedule(nodeID, 1*time.Hour) // Long expiry to ensure it doesn't trigger during test
|
||||
atomic.AddInt64(&scheduledCount, 1)
|
||||
|
||||
// Yield to other goroutines to introduce variability
|
||||
|
||||
@@ -17,7 +17,11 @@ import (
|
||||
"tailscale.com/net/tsaddr"
|
||||
)
|
||||
|
||||
var errGeneratedIPBytesInvalid = errors.New("generated ip bytes are invalid ip")
|
||||
var (
|
||||
errGeneratedIPBytesInvalid = errors.New("generated ip bytes are invalid ip")
|
||||
errGeneratedIPNotInPrefix = errors.New("generated ip not in prefix")
|
||||
errIPAllocatorNil = errors.New("ip allocator was nil")
|
||||
)
|
||||
|
||||
// IPAllocator is a singleton responsible for allocating
|
||||
// IP addresses for nodes and making sure the same
|
||||
@@ -62,8 +66,10 @@ func NewIPAllocator(
|
||||
strategy: strategy,
|
||||
}
|
||||
|
||||
var v4s []sql.NullString
|
||||
var v6s []sql.NullString
|
||||
var (
|
||||
v4s []sql.NullString
|
||||
v6s []sql.NullString
|
||||
)
|
||||
|
||||
if db != nil {
|
||||
err := db.Read(func(rx *gorm.DB) error {
|
||||
@@ -135,15 +141,18 @@ func (i *IPAllocator) Next() (*netip.Addr, *netip.Addr, error) {
|
||||
i.mu.Lock()
|
||||
defer i.mu.Unlock()
|
||||
|
||||
var err error
|
||||
var ret4 *netip.Addr
|
||||
var ret6 *netip.Addr
|
||||
var (
|
||||
err error
|
||||
ret4 *netip.Addr
|
||||
ret6 *netip.Addr
|
||||
)
|
||||
|
||||
if i.prefix4 != nil {
|
||||
ret4, err = i.next(i.prev4, i.prefix4)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("allocating IPv4 address: %w", err)
|
||||
}
|
||||
|
||||
i.prev4 = *ret4
|
||||
}
|
||||
|
||||
@@ -152,6 +161,7 @@ func (i *IPAllocator) Next() (*netip.Addr, *netip.Addr, error) {
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("allocating IPv6 address: %w", err)
|
||||
}
|
||||
|
||||
i.prev6 = *ret6
|
||||
}
|
||||
|
||||
@@ -168,8 +178,10 @@ func (i *IPAllocator) nextLocked(prev netip.Addr, prefix *netip.Prefix) (*netip.
|
||||
}
|
||||
|
||||
func (i *IPAllocator) next(prev netip.Addr, prefix *netip.Prefix) (*netip.Addr, error) {
|
||||
var err error
|
||||
var ip netip.Addr
|
||||
var (
|
||||
err error
|
||||
ip netip.Addr
|
||||
)
|
||||
|
||||
switch i.strategy {
|
||||
case types.IPAllocationStrategySequential:
|
||||
@@ -243,7 +255,8 @@ func randomNext(pfx netip.Prefix) (netip.Addr, error) {
|
||||
|
||||
if !pfx.Contains(ip) {
|
||||
return netip.Addr{}, fmt.Errorf(
|
||||
"generated ip(%s) not in prefix(%s)",
|
||||
"%w: ip(%s) not in prefix(%s)",
|
||||
errGeneratedIPNotInPrefix,
|
||||
ip.String(),
|
||||
pfx.String(),
|
||||
)
|
||||
@@ -268,11 +281,14 @@ func isTailscaleReservedIP(ip netip.Addr) bool {
|
||||
// If a prefix type has been removed (IPv4 or IPv6), it
|
||||
// will remove the IPs in that family from the node.
|
||||
func (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) {
|
||||
var err error
|
||||
var ret []string
|
||||
var (
|
||||
err error
|
||||
ret []string
|
||||
)
|
||||
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
if i == nil {
|
||||
return errors.New("backfilling IPs: ip allocator was nil")
|
||||
return fmt.Errorf("backfilling IPs: %w", errIPAllocatorNil)
|
||||
}
|
||||
|
||||
log.Trace().Caller().Msgf("starting to backfill IPs")
|
||||
@@ -283,18 +299,19 @@ func (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) {
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
log.Trace().Caller().Uint64("node.id", node.ID.Uint64()).Str("node.name", node.Hostname).Msg("IP backfill check started because node found in database")
|
||||
log.Trace().Caller().EmbedObject(node).Msg("ip backfill check started because node found in database")
|
||||
|
||||
changed := false
|
||||
// IPv4 prefix is set, but node ip is missing, alloc
|
||||
if i.prefix4 != nil && node.IPv4 == nil {
|
||||
ret4, err := i.nextLocked(i.prev4, i.prefix4)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to allocate ipv4 for node(%d): %w", node.ID, err)
|
||||
return fmt.Errorf("allocating IPv4 for node(%d): %w", node.ID, err)
|
||||
}
|
||||
|
||||
node.IPv4 = ret4
|
||||
changed = true
|
||||
|
||||
ret = append(ret, fmt.Sprintf("assigned IPv4 %q to Node(%d) %q", ret4.String(), node.ID, node.Hostname))
|
||||
}
|
||||
|
||||
@@ -302,11 +319,12 @@ func (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) {
|
||||
if i.prefix6 != nil && node.IPv6 == nil {
|
||||
ret6, err := i.nextLocked(i.prev6, i.prefix6)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to allocate ipv6 for node(%d): %w", node.ID, err)
|
||||
return fmt.Errorf("allocating IPv6 for node(%d): %w", node.ID, err)
|
||||
}
|
||||
|
||||
node.IPv6 = ret6
|
||||
changed = true
|
||||
|
||||
ret = append(ret, fmt.Sprintf("assigned IPv6 %q to Node(%d) %q", ret6.String(), node.ID, node.Hostname))
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
var mpp = func(pref string) *netip.Prefix {
|
||||
@@ -21,9 +20,7 @@ var mpp = func(pref string) *netip.Prefix {
|
||||
return &p
|
||||
}
|
||||
|
||||
var na = func(pref string) netip.Addr {
|
||||
return netip.MustParseAddr(pref)
|
||||
}
|
||||
var na = netip.MustParseAddr
|
||||
|
||||
var nap = func(pref string) *netip.Addr {
|
||||
n := na(pref)
|
||||
@@ -158,8 +155,10 @@ func TestIPAllocatorSequential(t *testing.T) {
|
||||
types.IPAllocationStrategySequential,
|
||||
)
|
||||
|
||||
var got4s []netip.Addr
|
||||
var got6s []netip.Addr
|
||||
var (
|
||||
got4s []netip.Addr
|
||||
got6s []netip.Addr
|
||||
)
|
||||
|
||||
for range tt.getCount {
|
||||
got4, got6, err := alloc.Next()
|
||||
@@ -175,6 +174,7 @@ func TestIPAllocatorSequential(t *testing.T) {
|
||||
got6s = append(got6s, *got6)
|
||||
}
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.want4, got4s, util.Comparers...); diff != "" {
|
||||
t.Errorf("IPAllocator 4s unexpected result (-want +got):\n%s", diff)
|
||||
}
|
||||
@@ -288,6 +288,7 @@ func TestBackfillIPAddresses(t *testing.T) {
|
||||
fullNodeP := func(i int) *types.Node {
|
||||
v4 := fmt.Sprintf("100.64.0.%d", i)
|
||||
v6 := fmt.Sprintf("fd7a:115c:a1e0::%d", i)
|
||||
|
||||
return &types.Node{
|
||||
IPv4: nap(v4),
|
||||
IPv6: nap(v6),
|
||||
@@ -484,12 +485,13 @@ func TestBackfillIPAddresses(t *testing.T) {
|
||||
func TestIPAllocatorNextNoReservedIPs(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
require.NoError(t, err)
|
||||
|
||||
defer db.Close()
|
||||
|
||||
alloc, err := NewIPAllocator(
|
||||
db,
|
||||
ptr.To(tsaddr.CGNATRange()),
|
||||
ptr.To(tsaddr.TailscaleULARange()),
|
||||
new(tsaddr.CGNATRange()),
|
||||
new(tsaddr.TailscaleULARange()),
|
||||
types.IPAllocationStrategySequential,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -497,17 +499,17 @@ func TestIPAllocatorNextNoReservedIPs(t *testing.T) {
|
||||
}
|
||||
|
||||
// Validate that we do not give out 100.100.100.100
|
||||
nextQuad100, err := alloc.next(na("100.100.100.99"), ptr.To(tsaddr.CGNATRange()))
|
||||
nextQuad100, err := alloc.next(na("100.100.100.99"), new(tsaddr.CGNATRange()))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, na("100.100.100.101"), *nextQuad100)
|
||||
|
||||
// Validate that we do not give out fd7a:115c:a1e0::53
|
||||
nextQuad100v6, err := alloc.next(na("fd7a:115c:a1e0::52"), ptr.To(tsaddr.TailscaleULARange()))
|
||||
nextQuad100v6, err := alloc.next(na("fd7a:115c:a1e0::52"), new(tsaddr.TailscaleULARange()))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, na("fd7a:115c:a1e0::54"), *nextQuad100v6)
|
||||
|
||||
// Validate that we do not give out fd7a:115c:a1e0::53
|
||||
nextChrome, err := alloc.next(na("100.115.91.255"), ptr.To(tsaddr.CGNATRange()))
|
||||
nextChrome, err := alloc.next(na("100.115.91.255"), new(tsaddr.CGNATRange()))
|
||||
t.Logf("chrome: %s", nextChrome.String())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, na("100.115.94.0"), *nextChrome)
|
||||
|
||||
@@ -16,18 +16,24 @@ import (
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/juanfont/headscale/hscontrol/util/zlog/zf"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
const (
|
||||
NodeGivenNameHashLength = 8
|
||||
NodeGivenNameTrimSize = 2
|
||||
|
||||
// defaultTestNodePrefix is the default hostname prefix for nodes created in tests.
|
||||
defaultTestNodePrefix = "testnode"
|
||||
)
|
||||
|
||||
// ErrNodeNameNotUnique is returned when a node name is not unique.
|
||||
var ErrNodeNameNotUnique = errors.New("node name is not unique")
|
||||
|
||||
var invalidDNSRegex = regexp.MustCompile("[^a-z0-9-.]+")
|
||||
|
||||
var (
|
||||
@@ -51,12 +57,14 @@ func (hsdb *HSDatabase) ListPeers(nodeID types.NodeID, peerIDs ...types.NodeID)
|
||||
// If at least one peer ID is given, only these peer nodes will be returned.
|
||||
func ListPeers(tx *gorm.DB, nodeID types.NodeID, peerIDs ...types.NodeID) (types.Nodes, error) {
|
||||
nodes := types.Nodes{}
|
||||
if err := tx.
|
||||
|
||||
err := tx.
|
||||
Preload("AuthKey").
|
||||
Preload("AuthKey.User").
|
||||
Preload("User").
|
||||
Where("id <> ?", nodeID).
|
||||
Where(peerIDs).Find(&nodes).Error; err != nil {
|
||||
Where(peerIDs).Find(&nodes).Error
|
||||
if err != nil {
|
||||
return types.Nodes{}, err
|
||||
}
|
||||
|
||||
@@ -75,11 +83,13 @@ func (hsdb *HSDatabase) ListNodes(nodeIDs ...types.NodeID) (types.Nodes, error)
|
||||
// or for the given nodes if at least one node ID is given as parameter.
|
||||
func ListNodes(tx *gorm.DB, nodeIDs ...types.NodeID) (types.Nodes, error) {
|
||||
nodes := types.Nodes{}
|
||||
if err := tx.
|
||||
|
||||
err := tx.
|
||||
Preload("AuthKey").
|
||||
Preload("AuthKey.User").
|
||||
Preload("User").
|
||||
Where(nodeIDs).Find(&nodes).Error; err != nil {
|
||||
Where(nodeIDs).Find(&nodes).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -89,7 +99,9 @@ func ListNodes(tx *gorm.DB, nodeIDs ...types.NodeID) (types.Nodes, error) {
|
||||
func (hsdb *HSDatabase) ListEphemeralNodes() (types.Nodes, error) {
|
||||
return Read(hsdb.DB, func(rx *gorm.DB) (types.Nodes, error) {
|
||||
nodes := types.Nodes{}
|
||||
if err := rx.Joins("AuthKey").Where(`"AuthKey"."ephemeral" = true`).Find(&nodes).Error; err != nil {
|
||||
|
||||
err := rx.Joins("AuthKey").Where(`"AuthKey"."ephemeral" = true`).Find(&nodes).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -207,6 +219,7 @@ func SetTags(
|
||||
|
||||
slices.Sort(tags)
|
||||
tags = slices.Compact(tags)
|
||||
|
||||
b, err := json.Marshal(tags)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -220,7 +233,7 @@ func SetTags(
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetTags takes a Node struct pointer and update the forced tags.
|
||||
// SetApprovedRoutes takes a Node struct pointer and updates the approved routes.
|
||||
func SetApprovedRoutes(
|
||||
tx *gorm.DB,
|
||||
nodeID types.NodeID,
|
||||
@@ -228,7 +241,8 @@ func SetApprovedRoutes(
|
||||
) error {
|
||||
if len(routes) == 0 {
|
||||
// if no routes are provided, we remove all
|
||||
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", "[]").Error; err != nil {
|
||||
err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", "[]").Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("removing approved routes: %w", err)
|
||||
}
|
||||
|
||||
@@ -251,7 +265,7 @@ func SetApprovedRoutes(
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", string(b)).Error; err != nil {
|
||||
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", string(b)).Error; err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("updating approved routes: %w", err)
|
||||
}
|
||||
|
||||
@@ -277,22 +291,25 @@ func SetLastSeen(tx *gorm.DB, nodeID types.NodeID, lastSeen time.Time) error {
|
||||
func RenameNode(tx *gorm.DB,
|
||||
nodeID types.NodeID, newName string,
|
||||
) error {
|
||||
if err := util.ValidateHostname(newName); err != nil {
|
||||
err := util.ValidateHostname(newName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("renaming node: %w", err)
|
||||
}
|
||||
|
||||
// Check if the new name is unique
|
||||
var count int64
|
||||
if err := tx.Model(&types.Node{}).Where("given_name = ? AND id != ?", newName, nodeID).Count(&count).Error; err != nil {
|
||||
return fmt.Errorf("failed to check name uniqueness: %w", err)
|
||||
|
||||
err = tx.Model(&types.Node{}).Where("given_name = ? AND id != ?", newName, nodeID).Count(&count).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking name uniqueness: %w", err)
|
||||
}
|
||||
|
||||
if count > 0 {
|
||||
return errors.New("name is not unique")
|
||||
return ErrNodeNameNotUnique
|
||||
}
|
||||
|
||||
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("given_name", newName).Error; err != nil {
|
||||
return fmt.Errorf("failed to rename node in the database: %w", err)
|
||||
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("given_name", newName).Error; err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("renaming node in database: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -323,7 +340,8 @@ func DeleteNode(tx *gorm.DB,
|
||||
node *types.Node,
|
||||
) error {
|
||||
// Unscoped causes the node to be fully removed from the database.
|
||||
if err := tx.Unscoped().Delete(&types.Node{}, node.ID).Error; err != nil {
|
||||
err := tx.Unscoped().Delete(&types.Node{}, node.ID).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -337,9 +355,11 @@ func (hsdb *HSDatabase) DeleteEphemeralNode(
|
||||
nodeID types.NodeID,
|
||||
) error {
|
||||
return hsdb.Write(func(tx *gorm.DB) error {
|
||||
if err := tx.Unscoped().Delete(&types.Node{}, nodeID).Error; err != nil {
|
||||
err := tx.Unscoped().Delete(&types.Node{}, nodeID).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -352,19 +372,19 @@ func RegisterNodeForTest(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *n
|
||||
}
|
||||
|
||||
logEvent := log.Debug().
|
||||
Str("node", node.Hostname).
|
||||
Str("machine_key", node.MachineKey.ShortString()).
|
||||
Str("node_key", node.NodeKey.ShortString())
|
||||
Str(zf.NodeHostname, node.Hostname).
|
||||
Str(zf.MachineKey, node.MachineKey.ShortString()).
|
||||
Str(zf.NodeKey, node.NodeKey.ShortString())
|
||||
|
||||
if node.User != nil {
|
||||
logEvent = logEvent.Str("user", node.User.Username())
|
||||
logEvent = logEvent.Str(zf.UserName, node.User.Username())
|
||||
} else if node.UserID != nil {
|
||||
logEvent = logEvent.Uint("user_id", *node.UserID)
|
||||
logEvent = logEvent.Uint(zf.UserID, *node.UserID)
|
||||
} else {
|
||||
logEvent = logEvent.Str("user", "none")
|
||||
logEvent = logEvent.Str(zf.UserName, "none")
|
||||
}
|
||||
|
||||
logEvent.Msg("Registering test node")
|
||||
logEvent.Msg("registering test node")
|
||||
|
||||
// If the a new node is registered with the same machine key, to the same user,
|
||||
// update the existing node.
|
||||
@@ -379,6 +399,7 @@ func RegisterNodeForTest(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *n
|
||||
if ipv4 == nil {
|
||||
ipv4 = oldNode.IPv4
|
||||
}
|
||||
|
||||
if ipv6 == nil {
|
||||
ipv6 = oldNode.IPv6
|
||||
}
|
||||
@@ -388,16 +409,17 @@ func RegisterNodeForTest(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *n
|
||||
// so we store the node.Expire and node.Nodekey that has been set when
|
||||
// adding it to the registrationCache
|
||||
if node.IPv4 != nil || node.IPv6 != nil {
|
||||
if err := tx.Save(&node).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed register existing node in the database: %w", err)
|
||||
err := tx.Save(&node).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("registering existing node in database: %w", err)
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Str("machine_key", node.MachineKey.ShortString()).
|
||||
Str("node_key", node.NodeKey.ShortString()).
|
||||
Str("user", node.User.Username()).
|
||||
Str(zf.NodeHostname, node.Hostname).
|
||||
Str(zf.MachineKey, node.MachineKey.ShortString()).
|
||||
Str(zf.NodeKey, node.NodeKey.ShortString()).
|
||||
Str(zf.UserName, node.User.Username()).
|
||||
Msg("Test node authorized again")
|
||||
|
||||
return &node, nil
|
||||
@@ -407,29 +429,30 @@ func RegisterNodeForTest(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *n
|
||||
node.IPv6 = ipv6
|
||||
|
||||
var err error
|
||||
|
||||
node.Hostname, err = util.NormaliseHostname(node.Hostname)
|
||||
if err != nil {
|
||||
newHostname := util.InvalidString()
|
||||
log.Info().Err(err).Str("invalid-hostname", node.Hostname).Str("new-hostname", newHostname).Msgf("Invalid hostname, replacing")
|
||||
log.Info().Err(err).Str(zf.InvalidHostname, node.Hostname).Str(zf.NewHostname, newHostname).Msgf("invalid hostname, replacing")
|
||||
node.Hostname = newHostname
|
||||
}
|
||||
|
||||
if node.GivenName == "" {
|
||||
givenName, err := EnsureUniqueGivenName(tx, node.Hostname)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to ensure unique given name: %w", err)
|
||||
return nil, fmt.Errorf("ensuring unique given name: %w", err)
|
||||
}
|
||||
|
||||
node.GivenName = givenName
|
||||
}
|
||||
|
||||
if err := tx.Save(&node).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed register(save) node in the database: %w", err)
|
||||
if err := tx.Save(&node).Error; err != nil { //nolint:noinlineerr
|
||||
return nil, fmt.Errorf("saving node to database: %w", err)
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Str(zf.NodeHostname, node.Hostname).
|
||||
Msg("Test node registered with the database")
|
||||
|
||||
return &node, nil
|
||||
@@ -491,8 +514,10 @@ func generateGivenName(suppliedName string, randomSuffix bool) (string, error) {
|
||||
|
||||
func isUniqueName(tx *gorm.DB, name string) (bool, error) {
|
||||
nodes := types.Nodes{}
|
||||
if err := tx.
|
||||
Where("given_name = ?", name).Find(&nodes).Error; err != nil {
|
||||
|
||||
err := tx.
|
||||
Where("given_name = ?", name).Find(&nodes).Error
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -646,7 +671,7 @@ func (hsdb *HSDatabase) CreateNodeForTest(user *types.User, hostname ...string)
|
||||
panic("CreateNodeForTest requires a valid user")
|
||||
}
|
||||
|
||||
nodeName := "testnode"
|
||||
nodeName := defaultTestNodePrefix
|
||||
if len(hostname) > 0 && hostname[0] != "" {
|
||||
nodeName = hostname[0]
|
||||
}
|
||||
@@ -668,7 +693,7 @@ func (hsdb *HSDatabase) CreateNodeForTest(user *types.User, hostname ...string)
|
||||
Hostname: nodeName,
|
||||
UserID: &user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: ptr.To(pak.ID),
|
||||
AuthKeyID: new(pak.ID),
|
||||
}
|
||||
|
||||
err = hsdb.DB.Save(node).Error
|
||||
@@ -694,9 +719,12 @@ func (hsdb *HSDatabase) CreateRegisteredNodeForTest(user *types.User, hostname .
|
||||
}
|
||||
|
||||
var registeredNode *types.Node
|
||||
|
||||
err = hsdb.DB.Transaction(func(tx *gorm.DB) error {
|
||||
var err error
|
||||
|
||||
registeredNode, err = RegisterNodeForTest(tx, *node, ipv4, ipv6)
|
||||
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -715,7 +743,7 @@ func (hsdb *HSDatabase) CreateNodesForTest(user *types.User, count int, hostname
|
||||
panic("CreateNodesForTest requires a valid user")
|
||||
}
|
||||
|
||||
prefix := "testnode"
|
||||
prefix := defaultTestNodePrefix
|
||||
if len(hostnamePrefix) > 0 && hostnamePrefix[0] != "" {
|
||||
prefix = hostnamePrefix[0]
|
||||
}
|
||||
@@ -738,7 +766,7 @@ func (hsdb *HSDatabase) CreateRegisteredNodesForTest(user *types.User, count int
|
||||
panic("CreateRegisteredNodesForTest requires a valid user")
|
||||
}
|
||||
|
||||
prefix := "testnode"
|
||||
prefix := defaultTestNodePrefix
|
||||
if len(hostnamePrefix) > 0 && hostnamePrefix[0] != "" {
|
||||
prefix = hostnamePrefix[0]
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
func TestGetNode(t *testing.T) {
|
||||
@@ -115,7 +114,7 @@ func TestExpireNode(t *testing.T) {
|
||||
Hostname: "testnode",
|
||||
UserID: &user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: ptr.To(pak.ID),
|
||||
AuthKeyID: new(pak.ID),
|
||||
Expiry: &time.Time{},
|
||||
}
|
||||
db.DB.Save(node)
|
||||
@@ -159,7 +158,7 @@ func TestSetTags(t *testing.T) {
|
||||
Hostname: "testnode",
|
||||
UserID: &user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: ptr.To(pak.ID),
|
||||
AuthKeyID: new(pak.ID),
|
||||
}
|
||||
|
||||
trx := db.DB.Save(node)
|
||||
@@ -187,6 +186,7 @@ func TestHeadscale_generateGivenName(t *testing.T) {
|
||||
suppliedName string
|
||||
randomSuffix bool
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
@@ -443,7 +443,7 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: tt.routes,
|
||||
},
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
|
||||
IPv4: new(netip.MustParseAddr("100.64.0.1")),
|
||||
}
|
||||
|
||||
err = adb.DB.Save(&node).Error
|
||||
@@ -460,17 +460,17 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
RoutableIPs: tt.routes,
|
||||
},
|
||||
Tags: []string{"tag:exit"},
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.2")),
|
||||
IPv4: new(netip.MustParseAddr("100.64.0.2")),
|
||||
}
|
||||
|
||||
err = adb.DB.Save(&nodeTagged).Error
|
||||
require.NoError(t, err)
|
||||
|
||||
users, err := adb.ListUsers()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodes, err := adb.ListNodes()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
pm, err := pmf(users, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
@@ -498,6 +498,7 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
if len(expectedRoutes1) == 0 {
|
||||
expectedRoutes1 = nil
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(expectedRoutes1, node1ByID.AllApprovedRoutes(), util.Comparers...); diff != "" {
|
||||
t.Errorf("unexpected enabled routes (-want +got):\n%s", diff)
|
||||
}
|
||||
@@ -509,6 +510,7 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
if len(expectedRoutes2) == 0 {
|
||||
expectedRoutes2 = nil
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(expectedRoutes2, node2ByID.AllApprovedRoutes(), util.Comparers...); diff != "" {
|
||||
t.Errorf("unexpected enabled routes (-want +got):\n%s", diff)
|
||||
}
|
||||
@@ -520,6 +522,7 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
func TestEphemeralGarbageCollectorOrder(t *testing.T) {
|
||||
want := []types.NodeID{1, 3}
|
||||
got := []types.NodeID{}
|
||||
|
||||
var mu sync.Mutex
|
||||
|
||||
deletionCount := make(chan struct{}, 10)
|
||||
@@ -527,6 +530,7 @@ func TestEphemeralGarbageCollectorOrder(t *testing.T) {
|
||||
e := NewEphemeralGarbageCollector(func(ni types.NodeID) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
got = append(got, ni)
|
||||
|
||||
deletionCount <- struct{}{}
|
||||
@@ -576,8 +580,10 @@ func TestEphemeralGarbageCollectorOrder(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEphemeralGarbageCollectorLoads(t *testing.T) {
|
||||
var got []types.NodeID
|
||||
var mu sync.Mutex
|
||||
var (
|
||||
got []types.NodeID
|
||||
mu sync.Mutex
|
||||
)
|
||||
|
||||
want := 1000
|
||||
|
||||
@@ -589,6 +595,7 @@ func TestEphemeralGarbageCollectorLoads(t *testing.T) {
|
||||
|
||||
// Yield to other goroutines to introduce variability
|
||||
runtime.Gosched()
|
||||
|
||||
got = append(got, ni)
|
||||
|
||||
atomic.AddInt64(&deletedCount, 1)
|
||||
@@ -616,9 +623,12 @@ func TestEphemeralGarbageCollectorLoads(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func generateRandomNumber(t *testing.T, max int64) int64 {
|
||||
//nolint:unused
|
||||
func generateRandomNumber(t *testing.T, maxVal int64) int64 {
|
||||
t.Helper()
|
||||
maxB := big.NewInt(max)
|
||||
|
||||
maxB := big.NewInt(maxVal)
|
||||
|
||||
n, err := rand.Int(rand.Reader, maxB)
|
||||
if err != nil {
|
||||
t.Fatalf("getting random number: %s", err)
|
||||
@@ -649,7 +659,7 @@ func TestListEphemeralNodes(t *testing.T) {
|
||||
Hostname: "test",
|
||||
UserID: &user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: ptr.To(pak.ID),
|
||||
AuthKeyID: new(pak.ID),
|
||||
}
|
||||
|
||||
nodeEph := types.Node{
|
||||
@@ -659,7 +669,7 @@ func TestListEphemeralNodes(t *testing.T) {
|
||||
Hostname: "ephemeral",
|
||||
UserID: &user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: ptr.To(pakEph.ID),
|
||||
AuthKeyID: new(pakEph.ID),
|
||||
}
|
||||
|
||||
err = db.DB.Save(&node).Error
|
||||
@@ -722,7 +732,7 @@ func TestNodeNaming(t *testing.T) {
|
||||
nodeInvalidHostname := types.Node{
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: "我的电脑",
|
||||
Hostname: "我的电脑", //nolint:gosmopolitan // intentional i18n test data
|
||||
UserID: &user2.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
}
|
||||
@@ -746,12 +756,15 @@ func TestNodeNaming(t *testing.T) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = RegisterNodeForTest(tx, node2, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = RegisterNodeForTest(tx, nodeInvalidHostname, ptr.To(mpp("100.64.0.66/32").Addr()), nil)
|
||||
_, err = RegisterNodeForTest(tx, nodeShortHostname, ptr.To(mpp("100.64.0.67/32").Addr()), nil)
|
||||
|
||||
_, _ = RegisterNodeForTest(tx, nodeInvalidHostname, new(mpp("100.64.0.66/32").Addr()), nil)
|
||||
_, err = RegisterNodeForTest(tx, nodeShortHostname, new(mpp("100.64.0.67/32").Addr()), nil)
|
||||
|
||||
return err
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -810,25 +823,25 @@ func TestNodeNaming(t *testing.T) {
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
return RenameNode(tx, nodes[0].ID, "test")
|
||||
})
|
||||
assert.ErrorContains(t, err, "name is not unique")
|
||||
require.ErrorContains(t, err, "name is not unique")
|
||||
|
||||
// Rename invalid chars
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
return RenameNode(tx, nodes[2].ID, "我的电脑")
|
||||
return RenameNode(tx, nodes[2].ID, "我的电脑") //nolint:gosmopolitan // intentional i18n test data
|
||||
})
|
||||
assert.ErrorContains(t, err, "invalid characters")
|
||||
require.ErrorContains(t, err, "invalid characters")
|
||||
|
||||
// Rename too short
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
return RenameNode(tx, nodes[3].ID, "a")
|
||||
})
|
||||
assert.ErrorContains(t, err, "at least 2 characters")
|
||||
require.ErrorContains(t, err, "at least 2 characters")
|
||||
|
||||
// Rename with emoji
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
return RenameNode(tx, nodes[0].ID, "hostname-with-💩")
|
||||
})
|
||||
assert.ErrorContains(t, err, "invalid characters")
|
||||
require.ErrorContains(t, err, "invalid characters")
|
||||
|
||||
// Rename with only emoji
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
@@ -896,12 +909,12 @@ func TestRenameNodeComprehensive(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "chinese_chars_with_dash_rejected",
|
||||
newName: "server-北京-01",
|
||||
newName: "server-北京-01", //nolint:gosmopolitan // intentional i18n test data
|
||||
wantErr: "invalid characters",
|
||||
},
|
||||
{
|
||||
name: "chinese_only_rejected",
|
||||
newName: "我的电脑",
|
||||
newName: "我的电脑", //nolint:gosmopolitan // intentional i18n test data
|
||||
wantErr: "invalid characters",
|
||||
},
|
||||
{
|
||||
@@ -911,7 +924,7 @@ func TestRenameNodeComprehensive(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "mixed_chinese_emoji_rejected",
|
||||
newName: "测试💻机器",
|
||||
newName: "测试💻机器", //nolint:gosmopolitan // intentional i18n test data
|
||||
wantErr: "invalid characters",
|
||||
},
|
||||
{
|
||||
@@ -1000,6 +1013,7 @@ func TestListPeers(t *testing.T) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = RegisterNodeForTest(tx, node2, nil, nil)
|
||||
|
||||
return err
|
||||
@@ -1085,6 +1099,7 @@ func TestListNodes(t *testing.T) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = RegisterNodeForTest(tx, node2, nil, nil)
|
||||
|
||||
return err
|
||||
|
||||
@@ -17,7 +17,8 @@ func (hsdb *HSDatabase) SetPolicy(policy string) (*types.Policy, error) {
|
||||
Data: policy,
|
||||
}
|
||||
|
||||
if err := hsdb.DB.Clauses(clause.Returning{}).Create(&p).Error; err != nil {
|
||||
err := hsdb.DB.Clauses(clause.Returning{}).Create(&p).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -138,8 +138,8 @@ func CreatePreAuthKey(
|
||||
Hash: hash, // Store hash
|
||||
}
|
||||
|
||||
if err := tx.Save(&key).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed to create key in the database: %w", err)
|
||||
if err := tx.Save(&key).Error; err != nil { //nolint:noinlineerr
|
||||
return nil, fmt.Errorf("creating key in database: %w", err)
|
||||
}
|
||||
|
||||
return &types.PreAuthKeyNew{
|
||||
@@ -155,9 +155,7 @@ func CreatePreAuthKey(
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) ListPreAuthKeys() ([]types.PreAuthKey, error) {
|
||||
return Read(hsdb.DB, func(rx *gorm.DB) ([]types.PreAuthKey, error) {
|
||||
return ListPreAuthKeys(rx)
|
||||
})
|
||||
return Read(hsdb.DB, ListPreAuthKeys)
|
||||
}
|
||||
|
||||
// ListPreAuthKeys returns all PreAuthKeys in the database.
|
||||
@@ -296,7 +294,7 @@ func DestroyPreAuthKey(tx *gorm.DB, id uint64) error {
|
||||
Where("auth_key_id = ?", id).
|
||||
Update("auth_key_id", nil).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to clear auth_key_id on nodes: %w", err)
|
||||
return fmt.Errorf("clearing auth_key_id on nodes: %w", err)
|
||||
}
|
||||
|
||||
// Then delete the pre-auth key
|
||||
@@ -325,14 +323,15 @@ func (hsdb *HSDatabase) DeletePreAuthKey(id uint64) error {
|
||||
func UsePreAuthKey(tx *gorm.DB, k *types.PreAuthKey) error {
|
||||
err := tx.Model(k).Update("used", true).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update key used status in the database: %w", err)
|
||||
return fmt.Errorf("updating key used status in database: %w", err)
|
||||
}
|
||||
|
||||
k.Used = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkExpirePreAuthKey marks a PreAuthKey as expired.
|
||||
// ExpirePreAuthKey marks a PreAuthKey as expired.
|
||||
func ExpirePreAuthKey(tx *gorm.DB, id uint64) error {
|
||||
now := time.Now()
|
||||
return tx.Model(&types.PreAuthKey{}).Where("id = ?", id).Update("expiration", now).Error
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
func TestCreatePreAuthKey(t *testing.T) {
|
||||
@@ -24,7 +23,7 @@ func TestCreatePreAuthKey(t *testing.T) {
|
||||
test: func(t *testing.T, db *HSDatabase) {
|
||||
t.Helper()
|
||||
|
||||
_, err := db.CreatePreAuthKey(ptr.To(types.UserID(12345)), true, false, nil, nil)
|
||||
_, err := db.CreatePreAuthKey(new(types.UserID(12345)), true, false, nil, nil)
|
||||
assert.Error(t, err)
|
||||
},
|
||||
},
|
||||
@@ -127,7 +126,7 @@ func TestCannotDeleteAssignedPreAuthKey(t *testing.T) {
|
||||
Hostname: "testest",
|
||||
UserID: &user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: ptr.To(key.ID),
|
||||
AuthKeyID: new(key.ID),
|
||||
}
|
||||
db.DB.Save(&node)
|
||||
|
||||
|
||||
@@ -362,7 +362,8 @@ func (c *Config) Validate() error {
|
||||
// ToURL builds a properly encoded SQLite connection string using _pragma parameters
|
||||
// compatible with modernc.org/sqlite driver.
|
||||
func (c *Config) ToURL() (string, error) {
|
||||
if err := c.Validate(); err != nil {
|
||||
err := c.Validate()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid config: %w", err)
|
||||
}
|
||||
|
||||
@@ -372,18 +373,23 @@ func (c *Config) ToURL() (string, error) {
|
||||
if c.BusyTimeout > 0 {
|
||||
pragmas = append(pragmas, fmt.Sprintf("busy_timeout=%d", c.BusyTimeout))
|
||||
}
|
||||
|
||||
if c.JournalMode != "" {
|
||||
pragmas = append(pragmas, fmt.Sprintf("journal_mode=%s", c.JournalMode))
|
||||
}
|
||||
|
||||
if c.AutoVacuum != "" {
|
||||
pragmas = append(pragmas, fmt.Sprintf("auto_vacuum=%s", c.AutoVacuum))
|
||||
}
|
||||
|
||||
if c.WALAutocheckpoint >= 0 {
|
||||
pragmas = append(pragmas, fmt.Sprintf("wal_autocheckpoint=%d", c.WALAutocheckpoint))
|
||||
}
|
||||
|
||||
if c.Synchronous != "" {
|
||||
pragmas = append(pragmas, fmt.Sprintf("synchronous=%s", c.Synchronous))
|
||||
}
|
||||
|
||||
if c.ForeignKeys {
|
||||
pragmas = append(pragmas, "foreign_keys=ON")
|
||||
}
|
||||
|
||||
@@ -294,6 +294,7 @@ func TestConfigToURL(t *testing.T) {
|
||||
t.Errorf("Config.ToURL() error = %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if got != tt.want {
|
||||
t.Errorf("Config.ToURL() = %q, want %q", got, tt.want)
|
||||
}
|
||||
@@ -306,6 +307,7 @@ func TestConfigToURLInvalid(t *testing.T) {
|
||||
Path: "",
|
||||
BusyTimeout: -1,
|
||||
}
|
||||
|
||||
_, err := config.ToURL()
|
||||
if err == nil {
|
||||
t.Error("Config.ToURL() with invalid config should return error")
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package sqliteconfig
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -101,7 +102,10 @@ func TestSQLiteDriverPragmaIntegration(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
// Test connection
|
||||
if err := db.Ping(); err != nil {
|
||||
ctx := context.Background()
|
||||
|
||||
err = db.PingContext(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to ping database: %v", err)
|
||||
}
|
||||
|
||||
@@ -109,8 +113,10 @@ func TestSQLiteDriverPragmaIntegration(t *testing.T) {
|
||||
for pragma, expectedValue := range tt.expected {
|
||||
t.Run("pragma_"+pragma, func(t *testing.T) {
|
||||
var actualValue any
|
||||
|
||||
query := "PRAGMA " + pragma
|
||||
err := db.QueryRow(query).Scan(&actualValue)
|
||||
|
||||
err := db.QueryRowContext(ctx, query).Scan(&actualValue)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query %s: %v", query, err)
|
||||
}
|
||||
@@ -163,6 +169,8 @@ func TestForeignKeyConstraintEnforcement(t *testing.T) {
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create test tables with foreign key relationship
|
||||
schema := `
|
||||
CREATE TABLE parent (
|
||||
@@ -178,23 +186,25 @@ func TestForeignKeyConstraintEnforcement(t *testing.T) {
|
||||
);
|
||||
`
|
||||
|
||||
if _, err := db.Exec(schema); err != nil {
|
||||
_, err = db.ExecContext(ctx, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create schema: %v", err)
|
||||
}
|
||||
|
||||
// Insert parent record
|
||||
if _, err := db.Exec("INSERT INTO parent (id, name) VALUES (1, 'Parent 1')"); err != nil {
|
||||
_, err = db.ExecContext(ctx, "INSERT INTO parent (id, name) VALUES (1, 'Parent 1')")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to insert parent: %v", err)
|
||||
}
|
||||
|
||||
// Test 1: Valid foreign key should work
|
||||
_, err = db.Exec("INSERT INTO child (id, parent_id, name) VALUES (1, 1, 'Child 1')")
|
||||
_, err = db.ExecContext(ctx, "INSERT INTO child (id, parent_id, name) VALUES (1, 1, 'Child 1')")
|
||||
if err != nil {
|
||||
t.Fatalf("Valid foreign key insert failed: %v", err)
|
||||
}
|
||||
|
||||
// Test 2: Invalid foreign key should fail
|
||||
_, err = db.Exec("INSERT INTO child (id, parent_id, name) VALUES (2, 999, 'Child 2')")
|
||||
_, err = db.ExecContext(ctx, "INSERT INTO child (id, parent_id, name) VALUES (2, 999, 'Child 2')")
|
||||
if err == nil {
|
||||
t.Error("Expected foreign key constraint violation, but insert succeeded")
|
||||
} else if !contains(err.Error(), "FOREIGN KEY constraint failed") {
|
||||
@@ -204,7 +214,7 @@ func TestForeignKeyConstraintEnforcement(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test 3: Deleting referenced parent should fail
|
||||
_, err = db.Exec("DELETE FROM parent WHERE id = 1")
|
||||
_, err = db.ExecContext(ctx, "DELETE FROM parent WHERE id = 1")
|
||||
if err == nil {
|
||||
t.Error("Expected foreign key constraint violation when deleting referenced parent")
|
||||
} else if !contains(err.Error(), "FOREIGN KEY constraint failed") {
|
||||
@@ -249,7 +259,8 @@ func TestJournalModeValidation(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
var actualMode string
|
||||
err = db.QueryRow("PRAGMA journal_mode").Scan(&actualMode)
|
||||
|
||||
err = db.QueryRowContext(context.Background(), "PRAGMA journal_mode").Scan(&actualMode)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query journal_mode: %v", err)
|
||||
}
|
||||
|
||||
@@ -53,16 +53,19 @@ func newPostgresDBForTest(t *testing.T) *url.URL {
|
||||
t.Helper()
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
srv, err := postgrestest.Start(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(srv.Cleanup)
|
||||
|
||||
u, err := srv.CreateDatabase(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Logf("created local postgres: %s", u)
|
||||
pu, _ := url.Parse(u)
|
||||
|
||||
|
||||
@@ -3,12 +3,19 @@ package db
|
||||
import (
|
||||
"context"
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"gorm.io/gorm/schema"
|
||||
)
|
||||
|
||||
var (
|
||||
errUnmarshalTextValue = errors.New("unmarshalling text value")
|
||||
errUnsupportedType = errors.New("unsupported type")
|
||||
errTextMarshalerOnly = errors.New("only encoding.TextMarshaler is supported")
|
||||
)
|
||||
|
||||
// Got from https://github.com/xdg-go/strum/blob/main/types.go
|
||||
var textUnmarshalerType = reflect.TypeFor[encoding.TextUnmarshaler]()
|
||||
|
||||
@@ -24,7 +31,7 @@ func maybeInstantiatePtr(rv reflect.Value) {
|
||||
}
|
||||
|
||||
func decodingError(name string, err error) error {
|
||||
return fmt.Errorf("error decoding to %s: %w", name, err)
|
||||
return fmt.Errorf("decoding to %s: %w", name, err)
|
||||
}
|
||||
|
||||
// TextSerialiser implements the Serialiser interface for fields that
|
||||
@@ -42,22 +49,26 @@ func (TextSerialiser) Scan(ctx context.Context, field *schema.Field, dst reflect
|
||||
|
||||
if dbValue != nil {
|
||||
var bytes []byte
|
||||
|
||||
switch v := dbValue.(type) {
|
||||
case []byte:
|
||||
bytes = v
|
||||
case string:
|
||||
bytes = []byte(v)
|
||||
default:
|
||||
return fmt.Errorf("failed to unmarshal text value: %#v", dbValue)
|
||||
return fmt.Errorf("%w: %#v", errUnmarshalTextValue, dbValue)
|
||||
}
|
||||
|
||||
if isTextUnmarshaler(fieldValue) {
|
||||
maybeInstantiatePtr(fieldValue)
|
||||
f := fieldValue.MethodByName("UnmarshalText")
|
||||
args := []reflect.Value{reflect.ValueOf(bytes)}
|
||||
|
||||
ret := f.Call(args)
|
||||
if !ret[0].IsNil() {
|
||||
return decodingError(field.Name, ret[0].Interface().(error))
|
||||
if err, ok := ret[0].Interface().(error); ok {
|
||||
return decodingError(field.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// If the underlying field is to a pointer type, we need to
|
||||
@@ -73,7 +84,7 @@ func (TextSerialiser) Scan(ctx context.Context, field *schema.Field, dst reflect
|
||||
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("unsupported type: %T", fieldValue.Interface())
|
||||
return fmt.Errorf("%w: %T", errUnsupportedType, fieldValue.Interface())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,8 +98,9 @@ func (TextSerialiser) Value(ctx context.Context, field *schema.Field, dst reflec
|
||||
// always comparable, particularly when reflection is involved:
|
||||
// https://dev.to/arxeiss/in-go-nil-is-not-equal-to-nil-sometimes-jn8
|
||||
if v == nil || (reflect.ValueOf(v).Kind() == reflect.Ptr && reflect.ValueOf(v).IsNil()) {
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil // intentional: nil value for GORM serializer
|
||||
}
|
||||
|
||||
b, err := v.MarshalText()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -96,6 +108,6 @@ func (TextSerialiser) Value(ctx context.Context, field *schema.Field, dst reflec
|
||||
|
||||
return string(b), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("only encoding.TextMarshaler is supported, got %t", v)
|
||||
return nil, fmt.Errorf("%w, got %T", errTextMarshalerOnly, v)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,9 +12,11 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUserExists = errors.New("user already exists")
|
||||
ErrUserNotFound = errors.New("user not found")
|
||||
ErrUserStillHasNodes = errors.New("user not empty: node(s) found")
|
||||
ErrUserExists = errors.New("user already exists")
|
||||
ErrUserNotFound = errors.New("user not found")
|
||||
ErrUserStillHasNodes = errors.New("user not empty: node(s) found")
|
||||
ErrUserWhereInvalidCount = errors.New("expect 0 or 1 where User structs")
|
||||
ErrUserNotUnique = errors.New("expected exactly one user")
|
||||
)
|
||||
|
||||
func (hsdb *HSDatabase) CreateUser(user types.User) (*types.User, error) {
|
||||
@@ -26,10 +28,13 @@ func (hsdb *HSDatabase) CreateUser(user types.User) (*types.User, error) {
|
||||
// CreateUser creates a new User. Returns error if could not be created
|
||||
// or another user already exists.
|
||||
func CreateUser(tx *gorm.DB, user types.User) (*types.User, error) {
|
||||
if err := util.ValidateHostname(user.Name); err != nil {
|
||||
err := util.ValidateHostname(user.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := tx.Create(&user).Error; err != nil {
|
||||
|
||||
err = tx.Create(&user).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating user: %w", err)
|
||||
}
|
||||
|
||||
@@ -54,6 +59,7 @@ func DestroyUser(tx *gorm.DB, uid types.UserID) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(nodes) > 0 {
|
||||
return ErrUserStillHasNodes
|
||||
}
|
||||
@@ -62,6 +68,7 @@ func DestroyUser(tx *gorm.DB, uid types.UserID) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, key := range keys {
|
||||
err = DestroyPreAuthKey(tx, key.ID)
|
||||
if err != nil {
|
||||
@@ -88,11 +95,13 @@ var ErrCannotChangeOIDCUser = errors.New("cannot edit OIDC user")
|
||||
// not exist or if another User exists with the new name.
|
||||
func RenameUser(tx *gorm.DB, uid types.UserID, newName string) error {
|
||||
var err error
|
||||
|
||||
oldUser, err := GetUserByID(tx, uid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = util.ValidateHostname(newName); err != nil {
|
||||
|
||||
if err = util.ValidateHostname(newName); err != nil { //nolint:noinlineerr
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -151,7 +160,7 @@ func (hsdb *HSDatabase) ListUsers(where ...*types.User) ([]types.User, error) {
|
||||
// ListUsers gets all the existing users.
|
||||
func ListUsers(tx *gorm.DB, where ...*types.User) ([]types.User, error) {
|
||||
if len(where) > 1 {
|
||||
return nil, fmt.Errorf("expect 0 or 1 where User structs, got %d", len(where))
|
||||
return nil, fmt.Errorf("%w, got %d", ErrUserWhereInvalidCount, len(where))
|
||||
}
|
||||
|
||||
var user *types.User
|
||||
@@ -160,7 +169,9 @@ func ListUsers(tx *gorm.DB, where ...*types.User) ([]types.User, error) {
|
||||
}
|
||||
|
||||
users := []types.User{}
|
||||
if err := tx.Where(user).Find(&users).Error; err != nil {
|
||||
|
||||
err := tx.Where(user).Find(&users).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -180,7 +191,7 @@ func (hsdb *HSDatabase) GetUserByName(name string) (*types.User, error) {
|
||||
}
|
||||
|
||||
if len(users) != 1 {
|
||||
return nil, fmt.Errorf("expected exactly one user, found %d", len(users))
|
||||
return nil, fmt.Errorf("%w, found %d", ErrUserNotUnique, len(users))
|
||||
}
|
||||
|
||||
return &users[0], nil
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
func TestCreateAndDestroyUser(t *testing.T) {
|
||||
@@ -79,7 +78,7 @@ func TestDestroyUserErrors(t *testing.T) {
|
||||
Hostname: "testnode",
|
||||
UserID: &user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: ptr.To(pak.ID),
|
||||
AuthKeyID: new(pak.ID),
|
||||
}
|
||||
trx := db.DB.Save(&node)
|
||||
require.NoError(t, trx.Error)
|
||||
|
||||
@@ -25,34 +25,39 @@ func (h *Headscale) debugHTTPServer() *http.Server {
|
||||
|
||||
if wantsJSON {
|
||||
overview := h.state.DebugOverviewJSON()
|
||||
|
||||
overviewJSON, err := json.MarshalIndent(overview, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(overviewJSON)
|
||||
_, _ = w.Write(overviewJSON)
|
||||
} else {
|
||||
// Default to text/plain for backward compatibility
|
||||
overview := h.state.DebugOverview()
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(overview))
|
||||
_, _ = w.Write([]byte(overview))
|
||||
}
|
||||
}))
|
||||
|
||||
// Configuration endpoint
|
||||
debug.Handle("config", "Current configuration", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
config := h.state.DebugConfig()
|
||||
|
||||
configJSON, err := json.MarshalIndent(config, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(configJSON)
|
||||
_, _ = w.Write(configJSON)
|
||||
}))
|
||||
|
||||
// Policy endpoint
|
||||
@@ -70,8 +75,9 @@ func (h *Headscale) debugHTTPServer() *http.Server {
|
||||
} else {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(policy))
|
||||
_, _ = w.Write([]byte(policy))
|
||||
}))
|
||||
|
||||
// Filter rules endpoint
|
||||
@@ -81,27 +87,31 @@ func (h *Headscale) debugHTTPServer() *http.Server {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
filterJSON, err := json.MarshalIndent(filter, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(filterJSON)
|
||||
_, _ = w.Write(filterJSON)
|
||||
}))
|
||||
|
||||
// SSH policies endpoint
|
||||
debug.Handle("ssh", "SSH policies per node", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
sshPolicies := h.state.DebugSSHPolicies()
|
||||
|
||||
sshJSON, err := json.MarshalIndent(sshPolicies, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(sshJSON)
|
||||
_, _ = w.Write(sshJSON)
|
||||
}))
|
||||
|
||||
// DERP map endpoint
|
||||
@@ -112,20 +122,23 @@ func (h *Headscale) debugHTTPServer() *http.Server {
|
||||
|
||||
if wantsJSON {
|
||||
derpInfo := h.state.DebugDERPJSON()
|
||||
|
||||
derpJSON, err := json.MarshalIndent(derpInfo, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(derpJSON)
|
||||
_, _ = w.Write(derpJSON)
|
||||
} else {
|
||||
// Default to text/plain for backward compatibility
|
||||
derpInfo := h.state.DebugDERPMap()
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(derpInfo))
|
||||
_, _ = w.Write([]byte(derpInfo))
|
||||
}
|
||||
}))
|
||||
|
||||
@@ -137,34 +150,39 @@ func (h *Headscale) debugHTTPServer() *http.Server {
|
||||
|
||||
if wantsJSON {
|
||||
nodeStoreNodes := h.state.DebugNodeStoreJSON()
|
||||
|
||||
nodeStoreJSON, err := json.MarshalIndent(nodeStoreNodes, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(nodeStoreJSON)
|
||||
_, _ = w.Write(nodeStoreJSON)
|
||||
} else {
|
||||
// Default to text/plain for backward compatibility
|
||||
nodeStoreInfo := h.state.DebugNodeStore()
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(nodeStoreInfo))
|
||||
_, _ = w.Write([]byte(nodeStoreInfo))
|
||||
}
|
||||
}))
|
||||
|
||||
// Registration cache endpoint
|
||||
debug.Handle("registration-cache", "Registration cache information", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
cacheInfo := h.state.DebugRegistrationCache()
|
||||
|
||||
cacheJSON, err := json.MarshalIndent(cacheInfo, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(cacheJSON)
|
||||
_, _ = w.Write(cacheJSON)
|
||||
}))
|
||||
|
||||
// Routes endpoint
|
||||
@@ -175,20 +193,23 @@ func (h *Headscale) debugHTTPServer() *http.Server {
|
||||
|
||||
if wantsJSON {
|
||||
routes := h.state.DebugRoutes()
|
||||
|
||||
routesJSON, err := json.MarshalIndent(routes, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(routesJSON)
|
||||
_, _ = w.Write(routesJSON)
|
||||
} else {
|
||||
// Default to text/plain for backward compatibility
|
||||
routes := h.state.DebugRoutesString()
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(routes))
|
||||
_, _ = w.Write([]byte(routes))
|
||||
}
|
||||
}))
|
||||
|
||||
@@ -200,20 +221,23 @@ func (h *Headscale) debugHTTPServer() *http.Server {
|
||||
|
||||
if wantsJSON {
|
||||
policyManagerInfo := h.state.DebugPolicyManagerJSON()
|
||||
|
||||
policyManagerJSON, err := json.MarshalIndent(policyManagerInfo, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(policyManagerJSON)
|
||||
_, _ = w.Write(policyManagerJSON)
|
||||
} else {
|
||||
// Default to text/plain for backward compatibility
|
||||
policyManagerInfo := h.state.DebugPolicyManager()
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(policyManagerInfo))
|
||||
_, _ = w.Write([]byte(policyManagerInfo))
|
||||
}
|
||||
}))
|
||||
|
||||
@@ -226,7 +250,8 @@ func (h *Headscale) debugHTTPServer() *http.Server {
|
||||
|
||||
if res == nil {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("HEADSCALE_DEBUG_DUMP_MAPRESPONSE_PATH not set"))
|
||||
_, _ = w.Write([]byte("HEADSCALE_DEBUG_DUMP_MAPRESPONSE_PATH not set"))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -235,9 +260,10 @@ func (h *Headscale) debugHTTPServer() *http.Server {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(resJSON)
|
||||
_, _ = w.Write(resJSON)
|
||||
}))
|
||||
|
||||
// Batcher endpoint
|
||||
@@ -257,14 +283,14 @@ func (h *Headscale) debugHTTPServer() *http.Server {
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(batcherJSON)
|
||||
_, _ = w.Write(batcherJSON)
|
||||
} else {
|
||||
// Default to text/plain for backward compatibility
|
||||
batcherInfo := h.debugBatcher()
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(batcherInfo))
|
||||
_, _ = w.Write([]byte(batcherInfo))
|
||||
}
|
||||
}))
|
||||
|
||||
@@ -313,6 +339,7 @@ func (h *Headscale) debugBatcher() string {
|
||||
activeConnections: info.ActiveConnections,
|
||||
})
|
||||
totalNodes++
|
||||
|
||||
if info.Connected {
|
||||
connectedCount++
|
||||
}
|
||||
@@ -327,9 +354,11 @@ func (h *Headscale) debugBatcher() string {
|
||||
activeConnections: 0,
|
||||
})
|
||||
totalNodes++
|
||||
|
||||
if connected {
|
||||
connectedCount++
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
@@ -400,6 +429,7 @@ func (h *Headscale) debugBatcherJSON() DebugBatcherInfo {
|
||||
ActiveConnections: 0,
|
||||
}
|
||||
info.TotalNodes++
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
@@ -28,11 +28,14 @@ func loadDERPMapFromPath(path string) (*tailcfg.DERPMap, error) {
|
||||
return nil, err
|
||||
}
|
||||
defer derpFile.Close()
|
||||
|
||||
var derpMap tailcfg.DERPMap
|
||||
|
||||
b, err := io.ReadAll(derpFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = yaml.Unmarshal(b, &derpMap)
|
||||
|
||||
return &derpMap, err
|
||||
@@ -57,12 +60,14 @@ func loadDERPMapFromURL(addr url.URL) (*tailcfg.DERPMap, error) {
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var derpMap tailcfg.DERPMap
|
||||
|
||||
err = json.Unmarshal(body, &derpMap)
|
||||
|
||||
return &derpMap, err
|
||||
@@ -134,6 +139,7 @@ func shuffleDERPMap(dm *tailcfg.DERPMap) {
|
||||
for id := range dm.Regions {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
|
||||
slices.Sort(ids)
|
||||
|
||||
for _, id := range ids {
|
||||
@@ -160,16 +166,18 @@ func derpRandom() *rand.Rand {
|
||||
|
||||
derpRandomOnce.Do(func() {
|
||||
seed := cmp.Or(viper.GetString("dns.base_domain"), time.Now().String())
|
||||
rnd := rand.New(rand.NewSource(0))
|
||||
rnd.Seed(int64(crc64.Checksum([]byte(seed), crc64Table)))
|
||||
rnd := rand.New(rand.NewSource(0)) //nolint:gosec // weak random is fine for DERP scrambling
|
||||
rnd.Seed(int64(crc64.Checksum([]byte(seed), crc64Table))) //nolint:gosec // safe conversion
|
||||
derpRandomInst = rnd
|
||||
})
|
||||
|
||||
return derpRandomInst
|
||||
}
|
||||
|
||||
func resetDerpRandomForTesting() {
|
||||
derpRandomMu.Lock()
|
||||
defer derpRandomMu.Unlock()
|
||||
|
||||
derpRandomOnce = sync.Once{}
|
||||
derpRandomInst = nil
|
||||
}
|
||||
|
||||
@@ -242,7 +242,9 @@ func TestShuffleDERPMapDeterministic(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
viper.Set("dns.base_domain", tt.baseDomain)
|
||||
|
||||
defer viper.Reset()
|
||||
|
||||
resetDerpRandomForTesting()
|
||||
|
||||
testMap := tt.derpMap.View().AsStruct()
|
||||
|
||||
@@ -54,7 +54,7 @@ func NewDERPServer(
|
||||
derpKey key.NodePrivate,
|
||||
cfg *types.DERPConfig,
|
||||
) (*DERPServer, error) {
|
||||
log.Trace().Caller().Msg("Creating new embedded DERP server")
|
||||
log.Trace().Caller().Msg("creating new embedded DERP server")
|
||||
server := derpserver.New(derpKey, util.TSLogfWrapper()) // nolint // zerolinter complains
|
||||
|
||||
if cfg.ServerVerifyClients {
|
||||
@@ -75,9 +75,12 @@ func (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) {
|
||||
if err != nil {
|
||||
return tailcfg.DERPRegion{}, err
|
||||
}
|
||||
var host string
|
||||
var port int
|
||||
var portStr string
|
||||
|
||||
var (
|
||||
host string
|
||||
port int
|
||||
portStr string
|
||||
)
|
||||
|
||||
// Extract hostname and port from URL
|
||||
host, portStr, err = net.SplitHostPort(serverURL.Host)
|
||||
@@ -98,13 +101,13 @@ func (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) {
|
||||
|
||||
// If debug flag is set, resolve hostname to IP address
|
||||
if debugUseDERPIP {
|
||||
ips, err := net.LookupIP(host)
|
||||
ips, err := new(net.Resolver).LookupIPAddr(context.Background(), host)
|
||||
if err != nil {
|
||||
log.Error().Caller().Err(err).Msgf("Failed to resolve DERP hostname %s to IP, using hostname", host)
|
||||
log.Error().Caller().Err(err).Msgf("failed to resolve DERP hostname %s to IP, using hostname", host)
|
||||
} else if len(ips) > 0 {
|
||||
// Use the first IP address
|
||||
ipStr := ips[0].String()
|
||||
log.Info().Caller().Msgf("HEADSCALE_DEBUG_DERP_USE_IP: Resolved %s to %s", host, ipStr)
|
||||
ipStr := ips[0].IP.String()
|
||||
log.Info().Caller().Msgf("HEADSCALE_DEBUG_DERP_USE_IP: resolved %s to %s", host, ipStr)
|
||||
host = ipStr
|
||||
}
|
||||
}
|
||||
@@ -130,14 +133,16 @@ func (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) {
|
||||
if err != nil {
|
||||
return tailcfg.DERPRegion{}, err
|
||||
}
|
||||
|
||||
portSTUN, err := strconv.Atoi(portSTUNStr)
|
||||
if err != nil {
|
||||
return tailcfg.DERPRegion{}, err
|
||||
}
|
||||
|
||||
localDERPregion.Nodes[0].STUNPort = portSTUN
|
||||
|
||||
log.Info().Caller().Msgf("DERP region: %+v", localDERPregion)
|
||||
log.Info().Caller().Msgf("DERP Nodes[0]: %+v", localDERPregion.Nodes[0])
|
||||
log.Info().Caller().Msgf("derp region: %+v", localDERPregion)
|
||||
log.Info().Caller().Msgf("derp nodes[0]: %+v", localDERPregion.Nodes[0])
|
||||
|
||||
return localDERPregion, nil
|
||||
}
|
||||
@@ -155,8 +160,10 @@ func (d *DERPServer) DERPHandler(
|
||||
Caller().
|
||||
Msg("No Upgrade header in DERP server request. If headscale is behind a reverse proxy, make sure it is configured to pass WebSockets through.")
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Type", "text/plain")
|
||||
writer.WriteHeader(http.StatusUpgradeRequired)
|
||||
|
||||
_, err := writer.Write([]byte("DERP requires connection upgrade"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
@@ -206,6 +213,7 @@ func (d *DERPServer) serveWebsocket(writer http.ResponseWriter, req *http.Reques
|
||||
return
|
||||
}
|
||||
defer websocketConn.Close(websocket.StatusInternalError, "closing")
|
||||
|
||||
if websocketConn.Subprotocol() != "derp" {
|
||||
websocketConn.Close(websocket.StatusPolicyViolation, "client must speak the derp subprotocol")
|
||||
|
||||
@@ -222,9 +230,10 @@ func (d *DERPServer) servePlain(writer http.ResponseWriter, req *http.Request) {
|
||||
|
||||
hijacker, ok := writer.(http.Hijacker)
|
||||
if !ok {
|
||||
log.Error().Caller().Msg("DERP requires Hijacker interface from Gin")
|
||||
log.Error().Caller().Msg("derp requires Hijacker interface from Gin")
|
||||
writer.Header().Set("Content-Type", "text/plain")
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
_, err := writer.Write([]byte("HTTP does not support general TCP support"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
@@ -238,9 +247,10 @@ func (d *DERPServer) servePlain(writer http.ResponseWriter, req *http.Request) {
|
||||
|
||||
netConn, conn, err := hijacker.Hijack()
|
||||
if err != nil {
|
||||
log.Error().Caller().Err(err).Msgf("Hijack failed")
|
||||
log.Error().Caller().Err(err).Msgf("hijack failed")
|
||||
writer.Header().Set("Content-Type", "text/plain")
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
_, err = writer.Write([]byte("HTTP does not support general TCP support"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
@@ -251,7 +261,8 @@ func (d *DERPServer) servePlain(writer http.ResponseWriter, req *http.Request) {
|
||||
|
||||
return
|
||||
}
|
||||
log.Trace().Caller().Msgf("Hijacked connection from %v", req.RemoteAddr)
|
||||
|
||||
log.Trace().Caller().Msgf("hijacked connection from %v", req.RemoteAddr)
|
||||
|
||||
if !fastStart {
|
||||
pubKey := d.key.Public()
|
||||
@@ -280,6 +291,7 @@ func DERPProbeHandler(
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
default:
|
||||
writer.WriteHeader(http.StatusMethodNotAllowed)
|
||||
|
||||
_, err := writer.Write([]byte("bogus probe method"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
@@ -309,9 +321,11 @@ func DERPBootstrapDNSHandler(
|
||||
|
||||
resolvCtx, cancel := context.WithTimeout(req.Context(), time.Minute)
|
||||
defer cancel()
|
||||
|
||||
var resolver net.Resolver
|
||||
for _, region := range derpMap.Regions().All() {
|
||||
for _, node := range region.Nodes().All() { // we don't care if we override some nodes
|
||||
|
||||
for _, region := range derpMap.Regions().All() { //nolint:unqueryvet // not SQLBoiler, tailcfg iterator
|
||||
for _, node := range region.Nodes().All() { //nolint:unqueryvet // not SQLBoiler, tailcfg iterator
|
||||
addrs, err := resolver.LookupIP(resolvCtx, "ip", node.HostName())
|
||||
if err != nil {
|
||||
log.Trace().
|
||||
@@ -321,11 +335,14 @@ func DERPBootstrapDNSHandler(
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
dnsEntries[node.HostName()] = addrs
|
||||
}
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
|
||||
err := json.NewEncoder(writer).Encode(dnsEntries)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
@@ -338,33 +355,37 @@ func DERPBootstrapDNSHandler(
|
||||
|
||||
// ServeSTUN starts a STUN server on the configured addr.
|
||||
func (d *DERPServer) ServeSTUN() {
|
||||
packetConn, err := net.ListenPacket("udp", d.cfg.STUNAddr)
|
||||
packetConn, err := new(net.ListenConfig).ListenPacket(context.Background(), "udp", d.cfg.STUNAddr)
|
||||
if err != nil {
|
||||
log.Fatal().Msgf("failed to open STUN listener: %v", err)
|
||||
}
|
||||
log.Info().Msgf("STUN server started at %s", packetConn.LocalAddr())
|
||||
|
||||
log.Info().Msgf("stun server started at %s", packetConn.LocalAddr())
|
||||
|
||||
udpConn, ok := packetConn.(*net.UDPConn)
|
||||
if !ok {
|
||||
log.Fatal().Msg("STUN listener is not a UDP listener")
|
||||
log.Fatal().Msg("stun listener is not a UDP listener")
|
||||
}
|
||||
|
||||
serverSTUNListener(context.Background(), udpConn)
|
||||
}
|
||||
|
||||
func serverSTUNListener(ctx context.Context, packetConn *net.UDPConn) {
|
||||
var buf [64 << 10]byte
|
||||
var (
|
||||
buf [64 << 10]byte
|
||||
bytesRead int
|
||||
udpAddr *net.UDPAddr
|
||||
err error
|
||||
)
|
||||
|
||||
for {
|
||||
bytesRead, udpAddr, err = packetConn.ReadFromUDP(buf[:])
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
log.Error().Caller().Err(err).Msgf("STUN ReadFrom")
|
||||
|
||||
log.Error().Caller().Err(err).Msgf("stun ReadFrom")
|
||||
|
||||
// Rate limit error logging - wait before retrying, but respect context cancellation
|
||||
select {
|
||||
@@ -375,25 +396,29 @@ func serverSTUNListener(ctx context.Context, packetConn *net.UDPConn) {
|
||||
|
||||
continue
|
||||
}
|
||||
log.Trace().Caller().Msgf("STUN request from %v", udpAddr)
|
||||
|
||||
log.Trace().Caller().Msgf("stun request from %v", udpAddr)
|
||||
|
||||
pkt := buf[:bytesRead]
|
||||
if !stun.Is(pkt) {
|
||||
log.Trace().Caller().Msgf("UDP packet is not STUN")
|
||||
log.Trace().Caller().Msgf("udp packet is not stun")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
txid, err := stun.ParseBindingRequest(pkt)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("STUN parse error")
|
||||
log.Trace().Caller().Err(err).Msgf("stun parse error")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
addr, _ := netip.AddrFromSlice(udpAddr.IP)
|
||||
res := stun.Response(txid, netip.AddrPortFrom(addr, uint16(udpAddr.Port)))
|
||||
res := stun.Response(txid, netip.AddrPortFrom(addr, uint16(udpAddr.Port))) //nolint:gosec // port is always <=65535
|
||||
|
||||
_, err = packetConn.WriteTo(res, udpAddr)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("Issue writing to UDP")
|
||||
log.Trace().Caller().Err(err).Msgf("issue writing to UDP")
|
||||
|
||||
continue
|
||||
}
|
||||
@@ -412,8 +437,10 @@ type DERPVerifyTransport struct {
|
||||
|
||||
func (t *DERPVerifyTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := t.handleVerifyRequest(req, buf); err != nil {
|
||||
log.Error().Caller().Err(err).Msg("Failed to handle client verify request: ")
|
||||
|
||||
err := t.handleVerifyRequest(req, buf)
|
||||
if err != nil {
|
||||
log.Error().Caller().Err(err).Msg("failed to handle client verify request")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
@@ -15,6 +16,9 @@ import (
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
// ErrPathIsDirectory is returned when a directory path is provided where a file is expected.
|
||||
var ErrPathIsDirectory = errors.New("path is a directory, only file is supported")
|
||||
|
||||
type ExtraRecordsMan struct {
|
||||
mu sync.RWMutex
|
||||
records set.Set[tailcfg.DNSRecord]
|
||||
@@ -39,7 +43,7 @@ func NewExtraRecordsManager(path string) (*ExtraRecordsMan, error) {
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
return nil, fmt.Errorf("path is a directory, only file is supported: %s", path)
|
||||
return nil, fmt.Errorf("%w: %s", ErrPathIsDirectory, path)
|
||||
}
|
||||
|
||||
records, hash, err := readExtraRecordsFromPath(path)
|
||||
@@ -85,19 +89,22 @@ func (e *ExtraRecordsMan) Run() {
|
||||
log.Error().Caller().Msgf("file watcher event channel closing")
|
||||
return
|
||||
}
|
||||
|
||||
switch event.Op {
|
||||
case fsnotify.Create, fsnotify.Write, fsnotify.Chmod:
|
||||
log.Trace().Caller().Str("path", event.Name).Str("op", event.Op.String()).Msg("extra records received filewatch event")
|
||||
|
||||
if event.Name != e.path {
|
||||
continue
|
||||
}
|
||||
|
||||
e.updateRecords()
|
||||
|
||||
// If a file is removed or renamed, fsnotify will loose track of it
|
||||
// and not watch it. We will therefore attempt to re-add it with a backoff.
|
||||
case fsnotify.Remove, fsnotify.Rename:
|
||||
_, err := backoff.Retry(context.Background(), func() (struct{}, error) {
|
||||
if _, err := os.Stat(e.path); err != nil {
|
||||
if _, err := os.Stat(e.path); err != nil { //nolint:noinlineerr
|
||||
return struct{}{}, err
|
||||
}
|
||||
|
||||
@@ -123,6 +130,7 @@ func (e *ExtraRecordsMan) Run() {
|
||||
log.Error().Caller().Msgf("file watcher error channel closing")
|
||||
return
|
||||
}
|
||||
|
||||
log.Error().Caller().Err(err).Msgf("extra records filewatcher returned error: %q", err)
|
||||
}
|
||||
}
|
||||
@@ -165,6 +173,7 @@ func (e *ExtraRecordsMan) updateRecords() {
|
||||
e.hashes[e.path] = newHash
|
||||
|
||||
log.Trace().Caller().Interface("records", e.records).Msgf("extra records updated from path, count old: %d, new: %d", oldCount, e.records.Len())
|
||||
|
||||
e.updateCh <- e.records.Slice()
|
||||
}
|
||||
|
||||
@@ -183,6 +192,7 @@ func readExtraRecordsFromPath(path string) ([]tailcfg.DNSRecord, [32]byte, error
|
||||
}
|
||||
|
||||
var records []tailcfg.DNSRecord
|
||||
|
||||
err = json.Unmarshal(b, &records)
|
||||
if err != nil {
|
||||
return nil, [32]byte{}, fmt.Errorf("unmarshalling records, content: %q: %w", string(b), err)
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/state"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/juanfont/headscale/hscontrol/util/zlog/zf"
|
||||
)
|
||||
|
||||
type headscaleV1APIServer struct { // v1.HeadscaleServiceServer
|
||||
@@ -54,7 +55,7 @@ func (api headscaleV1APIServer) CreateUser(
|
||||
}
|
||||
user, policyChanged, err := api.h.state.CreateUser(newUser)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to create user: %s", err)
|
||||
return nil, status.Errorf(codes.Internal, "creating user: %s", err)
|
||||
}
|
||||
|
||||
// CreateUser returns a policy change response if the user creation affected policy.
|
||||
@@ -235,16 +236,16 @@ func (api headscaleV1APIServer) RegisterNode(
|
||||
// Generate ephemeral registration key for tracking this registration flow in logs
|
||||
registrationKey, err := util.GenerateRegistrationKey()
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to generate registration key")
|
||||
log.Warn().Err(err).Msg("failed to generate registration key")
|
||||
registrationKey = "" // Continue without key if generation fails
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("user", request.GetUser()).
|
||||
Str("registration_id", request.GetKey()).
|
||||
Str("registration_key", registrationKey).
|
||||
Msg("Registering node")
|
||||
Str(zf.UserName, request.GetUser()).
|
||||
Str(zf.RegistrationID, request.GetKey()).
|
||||
Str(zf.RegistrationKey, registrationKey).
|
||||
Msg("registering node")
|
||||
|
||||
registrationId, err := types.RegistrationIDFromString(request.GetKey())
|
||||
if err != nil {
|
||||
@@ -264,17 +265,16 @@ func (api headscaleV1APIServer) RegisterNode(
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("registration_key", registrationKey).
|
||||
Str(zf.RegistrationKey, registrationKey).
|
||||
Err(err).
|
||||
Msg("Failed to register node")
|
||||
Msg("failed to register node")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("registration_key", registrationKey).
|
||||
Str("node_id", fmt.Sprintf("%d", node.ID())).
|
||||
Str("hostname", node.Hostname()).
|
||||
Msg("Node registered successfully")
|
||||
Str(zf.RegistrationKey, registrationKey).
|
||||
EmbedObject(node).
|
||||
Msg("node registered successfully")
|
||||
|
||||
// This is a bit of a back and forth, but we have a bit of a chicken and egg
|
||||
// dependency here.
|
||||
@@ -355,9 +355,9 @@ func (api headscaleV1APIServer) SetTags(
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("node", node.Hostname()).
|
||||
EmbedObject(node).
|
||||
Strs("tags", request.GetTags()).
|
||||
Msg("Changing tags of node")
|
||||
Msg("changing tags of node")
|
||||
|
||||
return &v1.SetTagsResponse{Node: node.Proto()}, nil
|
||||
}
|
||||
@@ -368,7 +368,7 @@ func (api headscaleV1APIServer) SetApprovedRoutes(
|
||||
) (*v1.SetApprovedRoutesResponse, error) {
|
||||
log.Debug().
|
||||
Caller().
|
||||
Uint64("node.id", request.GetNodeId()).
|
||||
Uint64(zf.NodeID, request.GetNodeId()).
|
||||
Strs("requestedRoutes", request.GetRoutes()).
|
||||
Msg("gRPC SetApprovedRoutes called")
|
||||
|
||||
@@ -387,7 +387,7 @@ func (api headscaleV1APIServer) SetApprovedRoutes(
|
||||
newApproved = append(newApproved, prefix)
|
||||
}
|
||||
}
|
||||
tsaddr.SortPrefixes(newApproved)
|
||||
slices.SortFunc(newApproved, netip.Prefix.Compare)
|
||||
newApproved = slices.Compact(newApproved)
|
||||
|
||||
node, nodeChange, err := api.h.state.SetApprovedRoutes(types.NodeID(request.GetNodeId()), newApproved)
|
||||
@@ -406,7 +406,7 @@ func (api headscaleV1APIServer) SetApprovedRoutes(
|
||||
|
||||
log.Debug().
|
||||
Caller().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
EmbedObject(node).
|
||||
Strs("approvedRoutes", util.PrefixesToString(node.ApprovedRoutes().AsSlice())).
|
||||
Strs("primaryRoutes", util.PrefixesToString(primaryRoutes)).
|
||||
Strs("finalSubnetRoutes", proto.SubnetRoutes).
|
||||
@@ -423,7 +423,7 @@ func validateTag(tag string) error {
|
||||
return errors.New("tag should be lowercase")
|
||||
}
|
||||
if len(strings.Fields(tag)) > 1 {
|
||||
return errors.New("tag should not contains space")
|
||||
return errors.New("tags must not contain spaces")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -466,8 +466,8 @@ func (api headscaleV1APIServer) ExpireNode(
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("node", node.Hostname()).
|
||||
Time("expiry", *node.AsStruct().Expiry).
|
||||
EmbedObject(node).
|
||||
Time(zf.ExpiresAt, *node.AsStruct().Expiry).
|
||||
Msg("node expired")
|
||||
|
||||
return &v1.ExpireNodeResponse{Node: node.Proto()}, nil
|
||||
@@ -487,8 +487,8 @@ func (api headscaleV1APIServer) RenameNode(
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("node", node.Hostname()).
|
||||
Str("new_name", request.GetNewName()).
|
||||
EmbedObject(node).
|
||||
Str(zf.NewName, request.GetNewName()).
|
||||
Msg("node renamed")
|
||||
|
||||
return &v1.RenameNodeResponse{Node: node.Proto()}, nil
|
||||
@@ -546,7 +546,7 @@ func (api headscaleV1APIServer) BackfillNodeIPs(
|
||||
ctx context.Context,
|
||||
request *v1.BackfillNodeIPsRequest,
|
||||
) (*v1.BackfillNodeIPsResponse, error) {
|
||||
log.Trace().Caller().Msg("Backfill called")
|
||||
log.Trace().Caller().Msg("backfill called")
|
||||
|
||||
if !request.Confirmed {
|
||||
return nil, errors.New("not confirmed, aborting")
|
||||
@@ -817,13 +817,13 @@ func (api headscaleV1APIServer) Health(
|
||||
response := &v1.HealthResponse{}
|
||||
|
||||
if err := api.h.state.PingDB(ctx); err != nil {
|
||||
healthErr = fmt.Errorf("database ping failed: %w", err)
|
||||
healthErr = fmt.Errorf("pinging database: %w", err)
|
||||
} else {
|
||||
response.DatabaseConnectivity = true
|
||||
}
|
||||
|
||||
if healthErr != nil {
|
||||
log.Error().Err(healthErr).Msg("Health check failed")
|
||||
log.Error().Err(healthErr).Msg("health check failed")
|
||||
}
|
||||
|
||||
return response, healthErr
|
||||
|
||||
@@ -17,6 +17,7 @@ func Test_validateTag(t *testing.T) {
|
||||
type args struct {
|
||||
tag string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
@@ -45,7 +46,8 @@ func Test_validateTag(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := validateTag(tt.args.tag); (err != nil) != tt.wantErr {
|
||||
err := validateTag(tt.args.tag)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("validateTag() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// The CapabilityVersion is used by Tailscale clients to indicate
|
||||
// NoiseCapabilityVersion is used by Tailscale clients to indicate
|
||||
// their codebase version. Tailscale clients can communicate over TS2021
|
||||
// from CapabilityVersion 28, but we only have good support for it
|
||||
// since https://github.com/tailscale/tailscale/pull/4323 (Noise in any HTTPS port).
|
||||
@@ -36,8 +36,7 @@ const (
|
||||
|
||||
// httpError logs an error and sends an HTTP error response with the given.
|
||||
func httpError(w http.ResponseWriter, err error) {
|
||||
var herr HTTPError
|
||||
if errors.As(err, &herr) {
|
||||
if herr, ok := errors.AsType[HTTPError](err); ok {
|
||||
http.Error(w, herr.Msg, herr.Code)
|
||||
log.Error().Err(herr.Err).Int("code", herr.Code).Msgf("user msg: %s", herr.Msg)
|
||||
} else {
|
||||
@@ -56,7 +55,7 @@ type HTTPError struct {
|
||||
func (e HTTPError) Error() string { return fmt.Sprintf("http error[%d]: %s, %s", e.Code, e.Msg, e.Err) }
|
||||
func (e HTTPError) Unwrap() error { return e.Err }
|
||||
|
||||
// Error returns an HTTPError containing the given information.
|
||||
// NewHTTPError returns an HTTPError containing the given information.
|
||||
func NewHTTPError(code int, msg string, err error) HTTPError {
|
||||
return HTTPError{Code: code, Msg: msg, Err: err}
|
||||
}
|
||||
@@ -64,7 +63,7 @@ func NewHTTPError(code int, msg string, err error) HTTPError {
|
||||
var errMethodNotAllowed = NewHTTPError(http.StatusMethodNotAllowed, "method not allowed", nil)
|
||||
|
||||
var ErrRegisterMethodCLIDoesNotSupportExpire = errors.New(
|
||||
"machines registered with CLI does not support expire",
|
||||
"machines registered with CLI do not support expiry",
|
||||
)
|
||||
|
||||
func parseCapabilityVersion(req *http.Request) (tailcfg.CapabilityVersion, error) {
|
||||
@@ -76,7 +75,7 @@ func parseCapabilityVersion(req *http.Request) (tailcfg.CapabilityVersion, error
|
||||
|
||||
clientCapabilityVersion, err := strconv.Atoi(clientCapabilityStr)
|
||||
if err != nil {
|
||||
return 0, NewHTTPError(http.StatusBadRequest, "invalid capability version", fmt.Errorf("failed to parse capability version: %w", err))
|
||||
return 0, NewHTTPError(http.StatusBadRequest, "invalid capability version", fmt.Errorf("parsing capability version: %w", err))
|
||||
}
|
||||
|
||||
return tailcfg.CapabilityVersion(clientCapabilityVersion), nil
|
||||
@@ -88,12 +87,12 @@ func (h *Headscale) handleVerifyRequest(
|
||||
) error {
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read request body: %w", err)
|
||||
return fmt.Errorf("reading request body: %w", err)
|
||||
}
|
||||
|
||||
var derpAdmitClientRequest tailcfg.DERPAdmitClientRequest
|
||||
if err := json.Unmarshal(body, &derpAdmitClientRequest); err != nil {
|
||||
return NewHTTPError(http.StatusBadRequest, "Bad Request: invalid JSON", fmt.Errorf("cannot parse derpAdmitClientRequest: %w", err))
|
||||
if err := json.Unmarshal(body, &derpAdmitClientRequest); err != nil { //nolint:noinlineerr
|
||||
return NewHTTPError(http.StatusBadRequest, "Bad Request: invalid JSON", fmt.Errorf("parsing DERP client request: %w", err))
|
||||
}
|
||||
|
||||
nodes := h.state.ListNodes()
|
||||
@@ -155,7 +154,11 @@ func (h *Headscale) KeyHandler(
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(writer).Encode(resp)
|
||||
|
||||
err := json.NewEncoder(writer).Encode(resp)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to encode public key response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
@@ -180,8 +183,12 @@ func (h *Headscale) HealthHandler(
|
||||
res.Status = "fail"
|
||||
}
|
||||
|
||||
json.NewEncoder(writer).Encode(res)
|
||||
encErr := json.NewEncoder(writer).Encode(res)
|
||||
if encErr != nil {
|
||||
log.Error().Err(encErr).Msg("failed to encode health response")
|
||||
}
|
||||
}
|
||||
|
||||
err := h.state.PingDB(req.Context())
|
||||
if err != nil {
|
||||
respond(err)
|
||||
@@ -218,6 +225,7 @@ func (h *Headscale) VersionHandler(
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
|
||||
versionInfo := types.GetVersionInfo()
|
||||
|
||||
err := json.NewEncoder(writer).Encode(versionInfo)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
@@ -244,7 +252,7 @@ func (a *AuthProviderWeb) AuthURL(registrationId types.RegistrationID) string {
|
||||
registrationId.String())
|
||||
}
|
||||
|
||||
// RegisterWebAPI shows a simple message in the browser to point to the CLI
|
||||
// RegisterHandler shows a simple message in the browser to point to the CLI
|
||||
// Listens in /register/:registration_id.
|
||||
//
|
||||
// This is not part of the Tailscale control API, as we could send whatever URL
|
||||
@@ -267,7 +275,11 @@ func (a *AuthProviderWeb) RegisterHandler(
|
||||
|
||||
writer.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
writer.Write([]byte(templates.RegisterWeb(registrationId).Render()))
|
||||
|
||||
_, err = writer.Write([]byte(templates.RegisterWeb(registrationId).Render()))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to write register response")
|
||||
}
|
||||
}
|
||||
|
||||
func FaviconHandler(writer http.ResponseWriter, req *http.Request) {
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/state"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/types/change"
|
||||
"github.com/juanfont/headscale/hscontrol/util/zlog/zf"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/puzpuzpuz/xsync/v4"
|
||||
@@ -15,6 +16,14 @@ import (
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
// Mapper errors.
|
||||
var (
|
||||
ErrInvalidNodeID = errors.New("invalid nodeID")
|
||||
ErrMapperNil = errors.New("mapper is nil")
|
||||
ErrNodeConnectionNil = errors.New("nodeConnection is nil")
|
||||
ErrNodeNotFoundMapper = errors.New("node not found")
|
||||
)
|
||||
|
||||
var mapResponseGenerated = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "headscale",
|
||||
Name: "mapresponse_generated_total",
|
||||
@@ -80,11 +89,11 @@ func generateMapResponse(nc nodeConnection, mapper *mapper, r change.Change) (*t
|
||||
}
|
||||
|
||||
if nodeID == 0 {
|
||||
return nil, fmt.Errorf("invalid nodeID: %d", nodeID)
|
||||
return nil, fmt.Errorf("%w: %d", ErrInvalidNodeID, nodeID)
|
||||
}
|
||||
|
||||
if mapper == nil {
|
||||
return nil, fmt.Errorf("mapper is nil for nodeID %d", nodeID)
|
||||
return nil, fmt.Errorf("%w for nodeID %d", ErrMapperNil, nodeID)
|
||||
}
|
||||
|
||||
// Handle self-only responses
|
||||
@@ -135,12 +144,12 @@ func generateMapResponse(nc nodeConnection, mapper *mapper, r change.Change) (*t
|
||||
// handleNodeChange generates and sends a [tailcfg.MapResponse] for a given node and [change.Change].
|
||||
func handleNodeChange(nc nodeConnection, mapper *mapper, r change.Change) error {
|
||||
if nc == nil {
|
||||
return errors.New("nodeConnection is nil")
|
||||
return ErrNodeConnectionNil
|
||||
}
|
||||
|
||||
nodeID := nc.nodeID()
|
||||
|
||||
log.Debug().Caller().Uint64("node.id", nodeID.Uint64()).Str("reason", r.Reason).Msg("Node change processing started because change notification received")
|
||||
log.Debug().Caller().Uint64(zf.NodeID, nodeID.Uint64()).Str(zf.Reason, r.Reason).Msg("node change processing started")
|
||||
|
||||
data, err := generateMapResponse(nc, mapper, r)
|
||||
if err != nil {
|
||||
|
||||
@@ -2,6 +2,7 @@ package mapper
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
@@ -10,13 +11,20 @@ import (
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/types/change"
|
||||
"github.com/juanfont/headscale/hscontrol/util/zlog/zf"
|
||||
"github.com/puzpuzpuz/xsync/v4"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
var errConnectionClosed = errors.New("connection channel already closed")
|
||||
// LockFreeBatcher errors.
|
||||
var (
|
||||
errConnectionClosed = errors.New("connection channel already closed")
|
||||
ErrInitialMapSendTimeout = errors.New("sending initial map: timeout")
|
||||
ErrBatcherShuttingDown = errors.New("batcher shutting down")
|
||||
ErrConnectionSendTimeout = errors.New("timeout sending to channel (likely stale connection)")
|
||||
)
|
||||
|
||||
// LockFreeBatcher uses atomic operations and concurrent maps to eliminate mutex contention.
|
||||
type LockFreeBatcher struct {
|
||||
@@ -48,6 +56,7 @@ type LockFreeBatcher struct {
|
||||
// and notifies other nodes that this node has come online.
|
||||
func (b *LockFreeBatcher) AddNode(id types.NodeID, c chan<- *tailcfg.MapResponse, version tailcfg.CapabilityVersion) error {
|
||||
addNodeStart := time.Now()
|
||||
nlog := log.With().Uint64(zf.NodeID, id.Uint64()).Logger()
|
||||
|
||||
// Generate connection ID
|
||||
connID := generateConnectionID()
|
||||
@@ -76,9 +85,10 @@ func (b *LockFreeBatcher) AddNode(id types.NodeID, c chan<- *tailcfg.MapResponse
|
||||
// Use the worker pool for controlled concurrency instead of direct generation
|
||||
initialMap, err := b.MapResponseFromChange(id, change.FullSelf(id))
|
||||
if err != nil {
|
||||
log.Error().Uint64("node.id", id.Uint64()).Err(err).Msg("Initial map generation failed")
|
||||
nlog.Error().Err(err).Msg("initial map generation failed")
|
||||
nodeConn.removeConnectionByChannel(c)
|
||||
return fmt.Errorf("failed to generate initial map for node %d: %w", id, err)
|
||||
|
||||
return fmt.Errorf("generating initial map for node %d: %w", id, err)
|
||||
}
|
||||
|
||||
// Use a blocking send with timeout for initial map since the channel should be ready
|
||||
@@ -86,12 +96,13 @@ func (b *LockFreeBatcher) AddNode(id types.NodeID, c chan<- *tailcfg.MapResponse
|
||||
select {
|
||||
case c <- initialMap:
|
||||
// Success
|
||||
case <-time.After(5 * time.Second):
|
||||
log.Error().Uint64("node.id", id.Uint64()).Err(fmt.Errorf("timeout")).Msg("Initial map send timeout")
|
||||
log.Debug().Caller().Uint64("node.id", id.Uint64()).Dur("timeout.duration", 5*time.Second).
|
||||
Msg("Initial map send timed out because channel was blocked or receiver not ready")
|
||||
case <-time.After(5 * time.Second): //nolint:mnd
|
||||
nlog.Error().Err(ErrInitialMapSendTimeout).Msg("initial map send timeout")
|
||||
nlog.Debug().Caller().Dur("timeout.duration", 5*time.Second). //nolint:mnd
|
||||
Msg("initial map send timed out because channel was blocked or receiver not ready")
|
||||
nodeConn.removeConnectionByChannel(c)
|
||||
return fmt.Errorf("failed to send initial map to node %d: timeout", id)
|
||||
|
||||
return fmt.Errorf("%w for node %d", ErrInitialMapSendTimeout, id)
|
||||
}
|
||||
|
||||
// Update connection status
|
||||
@@ -100,9 +111,9 @@ func (b *LockFreeBatcher) AddNode(id types.NodeID, c chan<- *tailcfg.MapResponse
|
||||
// Node will automatically receive updates through the normal flow
|
||||
// The initial full map already contains all current state
|
||||
|
||||
log.Debug().Caller().Uint64("node.id", id.Uint64()).Dur("total.duration", time.Since(addNodeStart)).
|
||||
nlog.Debug().Caller().Dur(zf.TotalDuration, time.Since(addNodeStart)).
|
||||
Int("active.connections", nodeConn.getActiveConnectionCount()).
|
||||
Msg("Node connection established in batcher because AddNode completed successfully")
|
||||
Msg("node connection established in batcher")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -112,31 +123,34 @@ func (b *LockFreeBatcher) AddNode(id types.NodeID, c chan<- *tailcfg.MapResponse
|
||||
// and keeps the node entry alive for rapid reconnections instead of aggressive deletion.
|
||||
// Reports if the node still has active connections after removal.
|
||||
func (b *LockFreeBatcher) RemoveNode(id types.NodeID, c chan<- *tailcfg.MapResponse) bool {
|
||||
nlog := log.With().Uint64(zf.NodeID, id.Uint64()).Logger()
|
||||
|
||||
nodeConn, exists := b.nodes.Load(id)
|
||||
if !exists {
|
||||
log.Debug().Caller().Uint64("node.id", id.Uint64()).Msg("RemoveNode called for non-existent node because node not found in batcher")
|
||||
nlog.Debug().Caller().Msg("removeNode called for non-existent node")
|
||||
return false
|
||||
}
|
||||
|
||||
// Remove specific connection
|
||||
removed := nodeConn.removeConnectionByChannel(c)
|
||||
if !removed {
|
||||
log.Debug().Caller().Uint64("node.id", id.Uint64()).Msg("RemoveNode: channel not found because connection already removed or invalid")
|
||||
nlog.Debug().Caller().Msg("removeNode: channel not found, connection already removed or invalid")
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if node has any remaining active connections
|
||||
if nodeConn.hasActiveConnections() {
|
||||
log.Debug().Caller().Uint64("node.id", id.Uint64()).
|
||||
nlog.Debug().Caller().
|
||||
Int("active.connections", nodeConn.getActiveConnectionCount()).
|
||||
Msg("Node connection removed but keeping online because other connections remain")
|
||||
Msg("node connection removed but keeping online, other connections remain")
|
||||
|
||||
return true // Node still has active connections
|
||||
}
|
||||
|
||||
// No active connections - keep the node entry alive for rapid reconnections
|
||||
// The node will get a fresh full map when it reconnects
|
||||
log.Debug().Caller().Uint64("node.id", id.Uint64()).Msg("Node disconnected from batcher because all connections removed, keeping entry for rapid reconnection")
|
||||
b.connected.Store(id, ptr.To(time.Now()))
|
||||
nlog.Debug().Caller().Msg("node disconnected from batcher, keeping entry for rapid reconnection")
|
||||
b.connected.Store(id, new(time.Now()))
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -196,11 +210,13 @@ func (b *LockFreeBatcher) doWork() {
|
||||
}
|
||||
|
||||
func (b *LockFreeBatcher) worker(workerID int) {
|
||||
wlog := log.With().Int(zf.WorkerID, workerID).Logger()
|
||||
|
||||
for {
|
||||
select {
|
||||
case w, ok := <-b.workCh:
|
||||
if !ok {
|
||||
log.Debug().Int("worker.id", workerID).Msgf("worker channel closing, shutting down worker %d", workerID)
|
||||
wlog.Debug().Msg("worker channel closing, shutting down")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -212,29 +228,29 @@ func (b *LockFreeBatcher) worker(workerID int) {
|
||||
// This is used for synchronous map generation.
|
||||
if w.resultCh != nil {
|
||||
var result workResult
|
||||
|
||||
if nc, exists := b.nodes.Load(w.nodeID); exists {
|
||||
var err error
|
||||
|
||||
result.mapResponse, err = generateMapResponse(nc, b.mapper, w.c)
|
||||
|
||||
result.err = err
|
||||
if result.err != nil {
|
||||
b.workErrors.Add(1)
|
||||
log.Error().Err(result.err).
|
||||
Int("worker.id", workerID).
|
||||
Uint64("node.id", w.nodeID.Uint64()).
|
||||
Str("reason", w.c.Reason).
|
||||
wlog.Error().Err(result.err).
|
||||
Uint64(zf.NodeID, w.nodeID.Uint64()).
|
||||
Str(zf.Reason, w.c.Reason).
|
||||
Msg("failed to generate map response for synchronous work")
|
||||
} else if result.mapResponse != nil {
|
||||
// Update peer tracking for synchronous responses too
|
||||
nc.updateSentPeers(result.mapResponse)
|
||||
}
|
||||
} else {
|
||||
result.err = fmt.Errorf("node %d not found", w.nodeID)
|
||||
result.err = fmt.Errorf("%w: %d", ErrNodeNotFoundMapper, w.nodeID)
|
||||
|
||||
b.workErrors.Add(1)
|
||||
log.Error().Err(result.err).
|
||||
Int("worker.id", workerID).
|
||||
Uint64("node.id", w.nodeID.Uint64()).
|
||||
wlog.Error().Err(result.err).
|
||||
Uint64(zf.NodeID, w.nodeID.Uint64()).
|
||||
Msg("node not found for synchronous work")
|
||||
}
|
||||
|
||||
@@ -257,15 +273,14 @@ func (b *LockFreeBatcher) worker(workerID int) {
|
||||
err := nc.change(w.c)
|
||||
if err != nil {
|
||||
b.workErrors.Add(1)
|
||||
log.Error().Err(err).
|
||||
Int("worker.id", workerID).
|
||||
Uint64("node.id", w.nodeID.Uint64()).
|
||||
Str("reason", w.c.Reason).
|
||||
wlog.Error().Err(err).
|
||||
Uint64(zf.NodeID, w.nodeID.Uint64()).
|
||||
Str(zf.Reason, w.c.Reason).
|
||||
Msg("failed to apply change")
|
||||
}
|
||||
}
|
||||
case <-b.done:
|
||||
log.Debug().Int("worker.id", workerID).Msg("batcher shutting down, exiting worker")
|
||||
wlog.Debug().Msg("batcher shutting down, exiting worker")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -310,8 +325,8 @@ func (b *LockFreeBatcher) addToBatch(changes ...change.Change) {
|
||||
if _, existed := b.nodes.LoadAndDelete(removedID); existed {
|
||||
b.totalNodes.Add(-1)
|
||||
log.Debug().
|
||||
Uint64("node.id", removedID.Uint64()).
|
||||
Msg("Removed deleted node from batcher")
|
||||
Uint64(zf.NodeID, removedID.Uint64()).
|
||||
Msg("removed deleted node from batcher")
|
||||
}
|
||||
|
||||
b.connected.Delete(removedID)
|
||||
@@ -398,14 +413,15 @@ func (b *LockFreeBatcher) cleanupOfflineNodes() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
// Clean up the identified nodes
|
||||
for _, nodeID := range nodesToCleanup {
|
||||
log.Info().Uint64("node.id", nodeID.Uint64()).
|
||||
log.Info().Uint64(zf.NodeID, nodeID.Uint64()).
|
||||
Dur("offline_duration", cleanupThreshold).
|
||||
Msg("Cleaning up node that has been offline for too long")
|
||||
Msg("cleaning up node that has been offline for too long")
|
||||
|
||||
b.nodes.Delete(nodeID)
|
||||
b.connected.Delete(nodeID)
|
||||
@@ -413,8 +429,8 @@ func (b *LockFreeBatcher) cleanupOfflineNodes() {
|
||||
}
|
||||
|
||||
if len(nodesToCleanup) > 0 {
|
||||
log.Info().Int("cleaned_nodes", len(nodesToCleanup)).
|
||||
Msg("Completed cleanup of long-offline nodes")
|
||||
log.Info().Int(zf.CleanedNodes, len(nodesToCleanup)).
|
||||
Msg("completed cleanup of long-offline nodes")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -450,6 +466,7 @@ func (b *LockFreeBatcher) ConnectedMap() *xsync.Map[types.NodeID, bool] {
|
||||
if nodeConn.hasActiveConnections() {
|
||||
ret.Store(id, true)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
@@ -465,6 +482,7 @@ func (b *LockFreeBatcher) ConnectedMap() *xsync.Map[types.NodeID, bool] {
|
||||
ret.Store(id, false)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
@@ -484,7 +502,7 @@ func (b *LockFreeBatcher) MapResponseFromChange(id types.NodeID, ch change.Chang
|
||||
case result := <-resultCh:
|
||||
return result.mapResponse, result.err
|
||||
case <-b.done:
|
||||
return nil, fmt.Errorf("batcher shutting down while generating map response for node %d", id)
|
||||
return nil, fmt.Errorf("%w while generating map response for node %d", ErrBatcherShuttingDown, id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -502,6 +520,7 @@ type connectionEntry struct {
|
||||
type multiChannelNodeConn struct {
|
||||
id types.NodeID
|
||||
mapper *mapper
|
||||
log zerolog.Logger
|
||||
|
||||
mutex sync.RWMutex
|
||||
connections []*connectionEntry
|
||||
@@ -518,8 +537,9 @@ type multiChannelNodeConn struct {
|
||||
// generateConnectionID generates a unique connection identifier.
|
||||
func generateConnectionID() string {
|
||||
bytes := make([]byte, 8)
|
||||
rand.Read(bytes)
|
||||
return fmt.Sprintf("%x", bytes)
|
||||
_, _ = rand.Read(bytes)
|
||||
|
||||
return hex.EncodeToString(bytes)
|
||||
}
|
||||
|
||||
// newMultiChannelNodeConn creates a new multi-channel node connection.
|
||||
@@ -528,6 +548,7 @@ func newMultiChannelNodeConn(id types.NodeID, mapper *mapper) *multiChannelNodeC
|
||||
id: id,
|
||||
mapper: mapper,
|
||||
lastSentPeers: xsync.NewMap[tailcfg.NodeID, struct{}](),
|
||||
log: log.With().Uint64(zf.NodeID, id.Uint64()).Logger(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -546,18 +567,21 @@ func (mc *multiChannelNodeConn) close() {
|
||||
// addConnection adds a new connection.
|
||||
func (mc *multiChannelNodeConn) addConnection(entry *connectionEntry) {
|
||||
mutexWaitStart := time.Now()
|
||||
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", entry.c)).Str("conn.id", entry.id).
|
||||
|
||||
mc.log.Debug().Caller().Str(zf.Chan, fmt.Sprintf("%p", entry.c)).Str(zf.ConnID, entry.id).
|
||||
Msg("addConnection: waiting for mutex - POTENTIAL CONTENTION POINT")
|
||||
|
||||
mc.mutex.Lock()
|
||||
|
||||
mutexWaitDur := time.Since(mutexWaitStart)
|
||||
|
||||
defer mc.mutex.Unlock()
|
||||
|
||||
mc.connections = append(mc.connections, entry)
|
||||
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", entry.c)).Str("conn.id", entry.id).
|
||||
mc.log.Debug().Caller().Str(zf.Chan, fmt.Sprintf("%p", entry.c)).Str(zf.ConnID, entry.id).
|
||||
Int("total_connections", len(mc.connections)).
|
||||
Dur("mutex_wait_time", mutexWaitDur).
|
||||
Msg("Successfully added connection after mutex wait")
|
||||
Msg("successfully added connection after mutex wait")
|
||||
}
|
||||
|
||||
// removeConnectionByChannel removes a connection by matching channel pointer.
|
||||
@@ -569,12 +593,14 @@ func (mc *multiChannelNodeConn) removeConnectionByChannel(c chan<- *tailcfg.MapR
|
||||
if entry.c == c {
|
||||
// Remove this connection
|
||||
mc.connections = append(mc.connections[:i], mc.connections[i+1:]...)
|
||||
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", c)).
|
||||
mc.log.Debug().Caller().Str(zf.Chan, fmt.Sprintf("%p", c)).
|
||||
Int("remaining_connections", len(mc.connections)).
|
||||
Msg("Successfully removed connection")
|
||||
Msg("successfully removed connection")
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -606,36 +632,41 @@ func (mc *multiChannelNodeConn) send(data *tailcfg.MapResponse) error {
|
||||
if len(mc.connections) == 0 {
|
||||
// During rapid reconnection, nodes may temporarily have no active connections
|
||||
// This is not an error - the node will receive a full map when it reconnects
|
||||
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).
|
||||
mc.log.Debug().Caller().
|
||||
Msg("send: skipping send to node with no active connections (likely rapid reconnection)")
|
||||
|
||||
return nil // Return success instead of error
|
||||
}
|
||||
|
||||
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).
|
||||
mc.log.Debug().Caller().
|
||||
Int("total_connections", len(mc.connections)).
|
||||
Msg("send: broadcasting to all connections")
|
||||
|
||||
var lastErr error
|
||||
|
||||
successCount := 0
|
||||
|
||||
var failedConnections []int // Track failed connections for removal
|
||||
|
||||
// Send to all connections
|
||||
for i, conn := range mc.connections {
|
||||
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", conn.c)).
|
||||
Str("conn.id", conn.id).Int("connection_index", i).
|
||||
mc.log.Debug().Caller().Str(zf.Chan, fmt.Sprintf("%p", conn.c)).
|
||||
Str(zf.ConnID, conn.id).Int(zf.ConnectionIndex, i).
|
||||
Msg("send: attempting to send to connection")
|
||||
|
||||
if err := conn.send(data); err != nil {
|
||||
err := conn.send(data)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
|
||||
failedConnections = append(failedConnections, i)
|
||||
log.Warn().Err(err).
|
||||
Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", conn.c)).
|
||||
Str("conn.id", conn.id).Int("connection_index", i).
|
||||
mc.log.Warn().Err(err).Str(zf.Chan, fmt.Sprintf("%p", conn.c)).
|
||||
Str(zf.ConnID, conn.id).Int(zf.ConnectionIndex, i).
|
||||
Msg("send: connection send failed")
|
||||
} else {
|
||||
successCount++
|
||||
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", conn.c)).
|
||||
Str("conn.id", conn.id).Int("connection_index", i).
|
||||
|
||||
mc.log.Debug().Caller().Str(zf.Chan, fmt.Sprintf("%p", conn.c)).
|
||||
Str(zf.ConnID, conn.id).Int(zf.ConnectionIndex, i).
|
||||
Msg("send: successfully sent to connection")
|
||||
}
|
||||
}
|
||||
@@ -643,15 +674,15 @@ func (mc *multiChannelNodeConn) send(data *tailcfg.MapResponse) error {
|
||||
// Remove failed connections (in reverse order to maintain indices)
|
||||
for i := len(failedConnections) - 1; i >= 0; i-- {
|
||||
idx := failedConnections[i]
|
||||
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).
|
||||
Str("conn.id", mc.connections[idx].id).
|
||||
mc.log.Debug().Caller().
|
||||
Str(zf.ConnID, mc.connections[idx].id).
|
||||
Msg("send: removing failed connection")
|
||||
mc.connections = append(mc.connections[:idx], mc.connections[idx+1:]...)
|
||||
}
|
||||
|
||||
mc.updateCount.Add(1)
|
||||
|
||||
log.Debug().Uint64("node.id", mc.id.Uint64()).
|
||||
mc.log.Debug().
|
||||
Int("successful_sends", successCount).
|
||||
Int("failed_connections", len(failedConnections)).
|
||||
Int("remaining_connections", len(mc.connections)).
|
||||
@@ -688,7 +719,7 @@ func (entry *connectionEntry) send(data *tailcfg.MapResponse) error {
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
// Connection is likely stale - client isn't reading from channel
|
||||
// This catches the case where Docker containers are killed but channels remain open
|
||||
return fmt.Errorf("connection %s: timeout sending to channel (likely stale connection)", entry.id)
|
||||
return fmt.Errorf("connection %s: %w", entry.id, ErrConnectionSendTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -798,6 +829,7 @@ func (b *LockFreeBatcher) Debug() map[types.NodeID]DebugNodeInfo {
|
||||
Connected: connected,
|
||||
ActiveConnections: activeConnCount,
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
@@ -812,6 +844,7 @@ func (b *LockFreeBatcher) Debug() map[types.NodeID]DebugNodeInfo {
|
||||
ActiveConnections: 0,
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@ type batcherTestCase struct {
|
||||
// that would normally be sent by poll.go in production.
|
||||
type testBatcherWrapper struct {
|
||||
Batcher
|
||||
|
||||
state *state.State
|
||||
}
|
||||
|
||||
@@ -80,12 +81,7 @@ func (t *testBatcherWrapper) RemoveNode(id types.NodeID, c chan<- *tailcfg.MapRe
|
||||
}
|
||||
|
||||
// Finally remove from the real batcher
|
||||
removed := t.Batcher.RemoveNode(id, c)
|
||||
if !removed {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return t.Batcher.RemoveNode(id, c)
|
||||
}
|
||||
|
||||
// wrapBatcherForTest wraps a batcher with test-specific behavior.
|
||||
@@ -129,8 +125,6 @@ const (
|
||||
SMALL_BUFFER_SIZE = 3
|
||||
TINY_BUFFER_SIZE = 1 // For maximum contention
|
||||
LARGE_BUFFER_SIZE = 200
|
||||
|
||||
reservedResponseHeaderSize = 4
|
||||
)
|
||||
|
||||
// TestData contains all test entities created for a test scenario.
|
||||
@@ -241,8 +235,8 @@ func setupBatcherWithTestData(
|
||||
}
|
||||
|
||||
derpMap, err := derp.GetDERPMap(cfg.DERP)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, derpMap)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, derpMap)
|
||||
|
||||
state.SetDERPMap(derpMap)
|
||||
|
||||
@@ -319,6 +313,8 @@ func (ut *updateTracker) recordUpdate(nodeID types.NodeID, updateSize int) {
|
||||
}
|
||||
|
||||
// getStats returns a copy of the statistics for a node.
|
||||
//
|
||||
//nolint:unused
|
||||
func (ut *updateTracker) getStats(nodeID types.NodeID) UpdateStats {
|
||||
ut.mu.RLock()
|
||||
defer ut.mu.RUnlock()
|
||||
@@ -386,16 +382,14 @@ type UpdateInfo struct {
|
||||
}
|
||||
|
||||
// parseUpdateAndAnalyze parses an update and returns detailed information.
|
||||
func parseUpdateAndAnalyze(resp *tailcfg.MapResponse) (UpdateInfo, error) {
|
||||
info := UpdateInfo{
|
||||
func parseUpdateAndAnalyze(resp *tailcfg.MapResponse) UpdateInfo {
|
||||
return UpdateInfo{
|
||||
PeerCount: len(resp.Peers),
|
||||
PatchCount: len(resp.PeersChangedPatch),
|
||||
IsFull: len(resp.Peers) > 0,
|
||||
IsPatch: len(resp.PeersChangedPatch) > 0,
|
||||
IsDERP: resp.DERPMap != nil,
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// start begins consuming updates from the node's channel and tracking stats.
|
||||
@@ -417,7 +411,8 @@ func (n *node) start() {
|
||||
atomic.AddInt64(&n.updateCount, 1)
|
||||
|
||||
// Parse update and track detailed stats
|
||||
if info, err := parseUpdateAndAnalyze(data); err == nil {
|
||||
info := parseUpdateAndAnalyze(data)
|
||||
{
|
||||
// Track update types
|
||||
if info.IsFull {
|
||||
atomic.AddInt64(&n.fullCount, 1)
|
||||
@@ -548,7 +543,7 @@ func TestEnhancedTrackingWithBatcher(t *testing.T) {
|
||||
testNode.start()
|
||||
|
||||
// Connect the node to the batcher
|
||||
batcher.AddNode(testNode.n.ID, testNode.ch, tailcfg.CapabilityVersion(100))
|
||||
_ = batcher.AddNode(testNode.n.ID, testNode.ch, tailcfg.CapabilityVersion(100))
|
||||
|
||||
// Wait for connection to be established
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
@@ -657,7 +652,7 @@ func TestBatcherScalabilityAllToAll(t *testing.T) {
|
||||
|
||||
for i := range allNodes {
|
||||
node := &allNodes[i]
|
||||
batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100))
|
||||
_ = batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100))
|
||||
|
||||
// Issue full update after each join to ensure connectivity
|
||||
batcher.AddWork(change.FullUpdate())
|
||||
@@ -676,6 +671,7 @@ func TestBatcherScalabilityAllToAll(t *testing.T) {
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
connectedCount := 0
|
||||
|
||||
for i := range allNodes {
|
||||
node := &allNodes[i]
|
||||
|
||||
@@ -693,6 +689,7 @@ func TestBatcherScalabilityAllToAll(t *testing.T) {
|
||||
}, 5*time.Minute, 5*time.Second, "waiting for full connectivity")
|
||||
|
||||
t.Logf("✅ All nodes achieved full connectivity!")
|
||||
|
||||
totalTime := time.Since(startTime)
|
||||
|
||||
// Disconnect all nodes
|
||||
@@ -820,11 +817,11 @@ func TestBatcherBasicOperations(t *testing.T) {
|
||||
defer cleanup()
|
||||
|
||||
batcher := testData.Batcher
|
||||
tn := testData.Nodes[0]
|
||||
tn2 := testData.Nodes[1]
|
||||
tn := &testData.Nodes[0]
|
||||
tn2 := &testData.Nodes[1]
|
||||
|
||||
// Test AddNode with real node ID
|
||||
batcher.AddNode(tn.n.ID, tn.ch, 100)
|
||||
_ = batcher.AddNode(tn.n.ID, tn.ch, 100)
|
||||
|
||||
if !batcher.IsConnected(tn.n.ID) {
|
||||
t.Error("Node should be connected after AddNode")
|
||||
@@ -842,10 +839,10 @@ func TestBatcherBasicOperations(t *testing.T) {
|
||||
}
|
||||
|
||||
// Drain any initial messages from first node
|
||||
drainChannelTimeout(tn.ch, "first node before second", 100*time.Millisecond)
|
||||
drainChannelTimeout(tn.ch, 100*time.Millisecond)
|
||||
|
||||
// Add the second node and verify update message
|
||||
batcher.AddNode(tn2.n.ID, tn2.ch, 100)
|
||||
_ = batcher.AddNode(tn2.n.ID, tn2.ch, 100)
|
||||
assert.True(t, batcher.IsConnected(tn2.n.ID))
|
||||
|
||||
// First node should get an update that second node has connected.
|
||||
@@ -911,18 +908,14 @@ func TestBatcherBasicOperations(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func drainChannelTimeout(ch <-chan *tailcfg.MapResponse, name string, timeout time.Duration) {
|
||||
count := 0
|
||||
|
||||
func drainChannelTimeout(ch <-chan *tailcfg.MapResponse, timeout time.Duration) {
|
||||
timer := time.NewTimer(timeout)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case data := <-ch:
|
||||
count++
|
||||
// Optional: add debug output if needed
|
||||
_ = data
|
||||
case <-ch:
|
||||
// Drain message
|
||||
case <-timer.C:
|
||||
return
|
||||
}
|
||||
@@ -1050,7 +1043,7 @@ func TestBatcherWorkQueueBatching(t *testing.T) {
|
||||
testNodes := testData.Nodes
|
||||
|
||||
ch := make(chan *tailcfg.MapResponse, 10)
|
||||
batcher.AddNode(testNodes[0].n.ID, ch, tailcfg.CapabilityVersion(100))
|
||||
_ = batcher.AddNode(testNodes[0].n.ID, ch, tailcfg.CapabilityVersion(100))
|
||||
|
||||
// Track update content for validation
|
||||
var receivedUpdates []*tailcfg.MapResponse
|
||||
@@ -1131,6 +1124,8 @@ func TestBatcherWorkQueueBatching(t *testing.T) {
|
||||
// even when real node updates are being processed, ensuring no race conditions
|
||||
// occur during channel replacement with actual workload.
|
||||
func XTestBatcherChannelClosingRace(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
for _, batcherFunc := range allBatcherFunctions {
|
||||
t.Run(batcherFunc.name, func(t *testing.T) {
|
||||
// Create test environment with real database and nodes
|
||||
@@ -1138,7 +1133,7 @@ func XTestBatcherChannelClosingRace(t *testing.T) {
|
||||
defer cleanup()
|
||||
|
||||
batcher := testData.Batcher
|
||||
testNode := testData.Nodes[0]
|
||||
testNode := &testData.Nodes[0]
|
||||
|
||||
var (
|
||||
channelIssues int
|
||||
@@ -1154,7 +1149,7 @@ func XTestBatcherChannelClosingRace(t *testing.T) {
|
||||
ch1 := make(chan *tailcfg.MapResponse, 1)
|
||||
|
||||
wg.Go(func() {
|
||||
batcher.AddNode(testNode.n.ID, ch1, tailcfg.CapabilityVersion(100))
|
||||
_ = batcher.AddNode(testNode.n.ID, ch1, tailcfg.CapabilityVersion(100))
|
||||
})
|
||||
|
||||
// Add real work during connection chaos
|
||||
@@ -1167,7 +1162,8 @@ func XTestBatcherChannelClosingRace(t *testing.T) {
|
||||
|
||||
wg.Go(func() {
|
||||
runtime.Gosched() // Yield to introduce timing variability
|
||||
batcher.AddNode(testNode.n.ID, ch2, tailcfg.CapabilityVersion(100))
|
||||
|
||||
_ = batcher.AddNode(testNode.n.ID, ch2, tailcfg.CapabilityVersion(100))
|
||||
})
|
||||
|
||||
// Remove second connection
|
||||
@@ -1231,7 +1227,7 @@ func TestBatcherWorkerChannelSafety(t *testing.T) {
|
||||
defer cleanup()
|
||||
|
||||
batcher := testData.Batcher
|
||||
testNode := testData.Nodes[0]
|
||||
testNode := &testData.Nodes[0]
|
||||
|
||||
var (
|
||||
panics int
|
||||
@@ -1258,7 +1254,7 @@ func TestBatcherWorkerChannelSafety(t *testing.T) {
|
||||
ch := make(chan *tailcfg.MapResponse, 5)
|
||||
|
||||
// Add node and immediately queue real work
|
||||
batcher.AddNode(testNode.n.ID, ch, tailcfg.CapabilityVersion(100))
|
||||
_ = batcher.AddNode(testNode.n.ID, ch, tailcfg.CapabilityVersion(100))
|
||||
batcher.AddWork(change.DERPMap())
|
||||
|
||||
// Consumer goroutine to validate data and detect channel issues
|
||||
@@ -1308,6 +1304,7 @@ func TestBatcherWorkerChannelSafety(t *testing.T) {
|
||||
for range i % 3 {
|
||||
runtime.Gosched() // Introduce timing variability
|
||||
}
|
||||
|
||||
batcher.RemoveNode(testNode.n.ID, ch)
|
||||
|
||||
// Yield to allow workers to process and close channels
|
||||
@@ -1350,6 +1347,8 @@ func TestBatcherWorkerChannelSafety(t *testing.T) {
|
||||
// real node data. The test validates that stable clients continue to function
|
||||
// normally and receive proper updates despite the connection churn from other clients,
|
||||
// ensuring system stability under concurrent load.
|
||||
//
|
||||
//nolint:gocyclo // complex concurrent test scenario
|
||||
func TestBatcherConcurrentClients(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping concurrent client test in short mode")
|
||||
@@ -1377,10 +1376,11 @@ func TestBatcherConcurrentClients(t *testing.T) {
|
||||
stableNodes := allNodes[:len(allNodes)/2] // Use first half as stable
|
||||
stableChannels := make(map[types.NodeID]chan *tailcfg.MapResponse)
|
||||
|
||||
for _, node := range stableNodes {
|
||||
for i := range stableNodes {
|
||||
node := &stableNodes[i]
|
||||
ch := make(chan *tailcfg.MapResponse, NORMAL_BUFFER_SIZE)
|
||||
stableChannels[node.n.ID] = ch
|
||||
batcher.AddNode(node.n.ID, ch, tailcfg.CapabilityVersion(100))
|
||||
_ = batcher.AddNode(node.n.ID, ch, tailcfg.CapabilityVersion(100))
|
||||
|
||||
// Monitor updates for each stable client
|
||||
go func(nodeID types.NodeID, channel chan *tailcfg.MapResponse) {
|
||||
@@ -1391,6 +1391,7 @@ func TestBatcherConcurrentClients(t *testing.T) {
|
||||
// Channel was closed, exit gracefully
|
||||
return
|
||||
}
|
||||
|
||||
if valid, reason := validateUpdateContent(data); valid {
|
||||
tracker.recordUpdate(
|
||||
nodeID,
|
||||
@@ -1427,7 +1428,9 @@ func TestBatcherConcurrentClients(t *testing.T) {
|
||||
|
||||
// Connection churn cycles - rapidly connect/disconnect to test concurrency safety
|
||||
for i := range numCycles {
|
||||
for _, node := range churningNodes {
|
||||
for j := range churningNodes {
|
||||
node := &churningNodes[j]
|
||||
|
||||
wg.Add(2)
|
||||
|
||||
// Connect churning node
|
||||
@@ -1448,10 +1451,12 @@ func TestBatcherConcurrentClients(t *testing.T) {
|
||||
ch := make(chan *tailcfg.MapResponse, SMALL_BUFFER_SIZE)
|
||||
|
||||
churningChannelsMutex.Lock()
|
||||
|
||||
churningChannels[nodeID] = ch
|
||||
|
||||
churningChannelsMutex.Unlock()
|
||||
|
||||
batcher.AddNode(nodeID, ch, tailcfg.CapabilityVersion(100))
|
||||
_ = batcher.AddNode(nodeID, ch, tailcfg.CapabilityVersion(100))
|
||||
|
||||
// Consume updates to prevent blocking
|
||||
go func() {
|
||||
@@ -1462,6 +1467,7 @@ func TestBatcherConcurrentClients(t *testing.T) {
|
||||
// Channel was closed, exit gracefully
|
||||
return
|
||||
}
|
||||
|
||||
if valid, _ := validateUpdateContent(data); valid {
|
||||
tracker.recordUpdate(
|
||||
nodeID,
|
||||
@@ -1494,6 +1500,7 @@ func TestBatcherConcurrentClients(t *testing.T) {
|
||||
for range i % 5 {
|
||||
runtime.Gosched() // Introduce timing variability
|
||||
}
|
||||
|
||||
churningChannelsMutex.Lock()
|
||||
|
||||
ch, exists := churningChannels[nodeID]
|
||||
@@ -1519,7 +1526,7 @@ func TestBatcherConcurrentClients(t *testing.T) {
|
||||
|
||||
if i%7 == 0 && len(allNodes) > 0 {
|
||||
// Node-specific changes using real nodes
|
||||
node := allNodes[i%len(allNodes)]
|
||||
node := &allNodes[i%len(allNodes)]
|
||||
// Use a valid expiry time for testing since test nodes don't have expiry set
|
||||
testExpiry := time.Now().Add(24 * time.Hour)
|
||||
batcher.AddWork(change.KeyExpiryFor(node.n.ID, testExpiry))
|
||||
@@ -1567,7 +1574,8 @@ func TestBatcherConcurrentClients(t *testing.T) {
|
||||
t.Logf("Work generated: %d DERP + %d Full + %d KeyExpiry = %d total AddWork calls",
|
||||
expectedDerpUpdates, expectedFullUpdates, expectedKeyUpdates, totalGeneratedWork)
|
||||
|
||||
for _, node := range stableNodes {
|
||||
for i := range stableNodes {
|
||||
node := &stableNodes[i]
|
||||
if stats, exists := allStats[node.n.ID]; exists {
|
||||
stableUpdateCount += stats.TotalUpdates
|
||||
t.Logf("Stable node %d: %d updates",
|
||||
@@ -1580,7 +1588,8 @@ func TestBatcherConcurrentClients(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
for _, node := range churningNodes {
|
||||
for i := range churningNodes {
|
||||
node := &churningNodes[i]
|
||||
if stats, exists := allStats[node.n.ID]; exists {
|
||||
churningUpdateCount += stats.TotalUpdates
|
||||
}
|
||||
@@ -1605,7 +1614,8 @@ func TestBatcherConcurrentClients(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify all stable clients are still functional
|
||||
for _, node := range stableNodes {
|
||||
for i := range stableNodes {
|
||||
node := &stableNodes[i]
|
||||
if !batcher.IsConnected(node.n.ID) {
|
||||
t.Errorf("Stable node %d lost connection during racing", node.n.ID)
|
||||
}
|
||||
@@ -1623,6 +1633,8 @@ func TestBatcherConcurrentClients(t *testing.T) {
|
||||
// It validates that the system remains stable with no deadlocks, panics, or
|
||||
// missed updates under sustained high load. The test uses real node data to
|
||||
// generate authentic update scenarios and tracks comprehensive statistics.
|
||||
//
|
||||
//nolint:gocyclo,thelper // complex scalability test scenario
|
||||
func XTestBatcherScalability(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping scalability test in short mode")
|
||||
@@ -1651,7 +1663,7 @@ func XTestBatcherScalability(t *testing.T) {
|
||||
description string
|
||||
}
|
||||
|
||||
var testCases []testCase
|
||||
testCases := make([]testCase, 0, len(chaosTypes)*len(bufferSizes)*len(cycles)*len(nodes))
|
||||
|
||||
// Generate all combinations of the test matrix
|
||||
for _, nodeCount := range nodes {
|
||||
@@ -1762,7 +1774,8 @@ func XTestBatcherScalability(t *testing.T) {
|
||||
|
||||
for i := range testNodes {
|
||||
node := &testNodes[i]
|
||||
batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100))
|
||||
_ = batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100))
|
||||
|
||||
connectedNodesMutex.Lock()
|
||||
|
||||
connectedNodes[node.n.ID] = true
|
||||
@@ -1824,7 +1837,8 @@ func XTestBatcherScalability(t *testing.T) {
|
||||
}
|
||||
|
||||
// Connection/disconnection cycles for subset of nodes
|
||||
for i, node := range chaosNodes {
|
||||
for i := range chaosNodes {
|
||||
node := &chaosNodes[i]
|
||||
// Only add work if this is connection chaos or mixed
|
||||
if tc.chaosType == "connection" || tc.chaosType == "mixed" {
|
||||
wg.Add(2)
|
||||
@@ -1878,6 +1892,7 @@ func XTestBatcherScalability(t *testing.T) {
|
||||
channel,
|
||||
tailcfg.CapabilityVersion(100),
|
||||
)
|
||||
|
||||
connectedNodesMutex.Lock()
|
||||
|
||||
connectedNodes[nodeID] = true
|
||||
@@ -2138,8 +2153,9 @@ func TestBatcherFullPeerUpdates(t *testing.T) {
|
||||
t.Logf("Created %d nodes in database", len(allNodes))
|
||||
|
||||
// Connect nodes one at a time and wait for each to be connected
|
||||
for i, node := range allNodes {
|
||||
batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100))
|
||||
for i := range allNodes {
|
||||
node := &allNodes[i]
|
||||
_ = batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100))
|
||||
t.Logf("Connected node %d (ID: %d)", i, node.n.ID)
|
||||
|
||||
// Wait for node to be connected
|
||||
@@ -2157,7 +2173,8 @@ func TestBatcherFullPeerUpdates(t *testing.T) {
|
||||
}, 5*time.Second, 50*time.Millisecond, "waiting for all nodes to connect")
|
||||
|
||||
// Check how many peers each node should see
|
||||
for i, node := range allNodes {
|
||||
for i := range allNodes {
|
||||
node := &allNodes[i]
|
||||
peers := testData.State.ListPeers(node.n.ID)
|
||||
t.Logf("Node %d should see %d peers from state", i, peers.Len())
|
||||
}
|
||||
@@ -2286,7 +2303,10 @@ func TestBatcherRapidReconnection(t *testing.T) {
|
||||
|
||||
// Phase 1: Connect all nodes initially
|
||||
t.Logf("Phase 1: Connecting all nodes...")
|
||||
for i, node := range allNodes {
|
||||
|
||||
for i := range allNodes {
|
||||
node := &allNodes[i]
|
||||
|
||||
err := batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to add node %d: %v", i, err)
|
||||
@@ -2302,16 +2322,21 @@ func TestBatcherRapidReconnection(t *testing.T) {
|
||||
|
||||
// Phase 2: Rapid disconnect ALL nodes (simulating nodes going down)
|
||||
t.Logf("Phase 2: Rapid disconnect all nodes...")
|
||||
for i, node := range allNodes {
|
||||
|
||||
for i := range allNodes {
|
||||
node := &allNodes[i]
|
||||
removed := batcher.RemoveNode(node.n.ID, node.ch)
|
||||
t.Logf("Node %d RemoveNode result: %t", i, removed)
|
||||
}
|
||||
|
||||
// Phase 3: Rapid reconnect with NEW channels (simulating nodes coming back up)
|
||||
t.Logf("Phase 3: Rapid reconnect with new channels...")
|
||||
|
||||
newChannels := make([]chan *tailcfg.MapResponse, len(allNodes))
|
||||
for i, node := range allNodes {
|
||||
for i := range allNodes {
|
||||
node := &allNodes[i]
|
||||
newChannels[i] = make(chan *tailcfg.MapResponse, 10)
|
||||
|
||||
err := batcher.AddNode(node.n.ID, newChannels[i], tailcfg.CapabilityVersion(100))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to reconnect node %d: %v", i, err)
|
||||
@@ -2334,7 +2359,8 @@ func TestBatcherRapidReconnection(t *testing.T) {
|
||||
debugInfo := debugBatcher.Debug()
|
||||
disconnectedCount := 0
|
||||
|
||||
for i, node := range allNodes {
|
||||
for i := range allNodes {
|
||||
node := &allNodes[i]
|
||||
if info, exists := debugInfo[node.n.ID]; exists {
|
||||
t.Logf("Node %d (ID %d): debug info = %+v", i, node.n.ID, info)
|
||||
|
||||
@@ -2342,11 +2368,13 @@ func TestBatcherRapidReconnection(t *testing.T) {
|
||||
if infoMap, ok := info.(map[string]any); ok {
|
||||
if connected, ok := infoMap["connected"].(bool); ok && !connected {
|
||||
disconnectedCount++
|
||||
|
||||
t.Logf("BUG REPRODUCED: Node %d shows as disconnected in debug but should be connected", i)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
disconnectedCount++
|
||||
|
||||
t.Logf("Node %d missing from debug info entirely", i)
|
||||
}
|
||||
|
||||
@@ -2381,6 +2409,7 @@ func TestBatcherRapidReconnection(t *testing.T) {
|
||||
case update := <-newChannels[i]:
|
||||
if update != nil {
|
||||
receivedCount++
|
||||
|
||||
t.Logf("Node %d received update successfully", i)
|
||||
}
|
||||
case <-timeout:
|
||||
@@ -2399,6 +2428,7 @@ func TestBatcherRapidReconnection(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:gocyclo // complex multi-connection test scenario
|
||||
func TestBatcherMultiConnection(t *testing.T) {
|
||||
for _, batcherFunc := range allBatcherFunctions {
|
||||
t.Run(batcherFunc.name, func(t *testing.T) {
|
||||
@@ -2406,13 +2436,14 @@ func TestBatcherMultiConnection(t *testing.T) {
|
||||
defer cleanup()
|
||||
|
||||
batcher := testData.Batcher
|
||||
node1 := testData.Nodes[0]
|
||||
node2 := testData.Nodes[1]
|
||||
node1 := &testData.Nodes[0]
|
||||
node2 := &testData.Nodes[1]
|
||||
|
||||
t.Logf("=== MULTI-CONNECTION TEST ===")
|
||||
|
||||
// Phase 1: Connect first node with initial connection
|
||||
t.Logf("Phase 1: Connecting node 1 with first connection...")
|
||||
|
||||
err := batcher.AddNode(node1.n.ID, node1.ch, tailcfg.CapabilityVersion(100))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to add node1: %v", err)
|
||||
@@ -2432,7 +2463,9 @@ func TestBatcherMultiConnection(t *testing.T) {
|
||||
|
||||
// Phase 2: Add second connection for node1 (multi-connection scenario)
|
||||
t.Logf("Phase 2: Adding second connection for node 1...")
|
||||
|
||||
secondChannel := make(chan *tailcfg.MapResponse, 10)
|
||||
|
||||
err = batcher.AddNode(node1.n.ID, secondChannel, tailcfg.CapabilityVersion(100))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to add second connection for node1: %v", err)
|
||||
@@ -2443,7 +2476,9 @@ func TestBatcherMultiConnection(t *testing.T) {
|
||||
|
||||
// Phase 3: Add third connection for node1
|
||||
t.Logf("Phase 3: Adding third connection for node 1...")
|
||||
|
||||
thirdChannel := make(chan *tailcfg.MapResponse, 10)
|
||||
|
||||
err = batcher.AddNode(node1.n.ID, thirdChannel, tailcfg.CapabilityVersion(100))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to add third connection for node1: %v", err)
|
||||
@@ -2454,6 +2489,7 @@ func TestBatcherMultiConnection(t *testing.T) {
|
||||
|
||||
// Phase 4: Verify debug status shows correct connection count
|
||||
t.Logf("Phase 4: Verifying debug status shows multiple connections...")
|
||||
|
||||
if debugBatcher, ok := batcher.(interface {
|
||||
Debug() map[types.NodeID]any
|
||||
}); ok {
|
||||
@@ -2461,6 +2497,7 @@ func TestBatcherMultiConnection(t *testing.T) {
|
||||
|
||||
if info, exists := debugInfo[node1.n.ID]; exists {
|
||||
t.Logf("Node1 debug info: %+v", info)
|
||||
|
||||
if infoMap, ok := info.(map[string]any); ok {
|
||||
if activeConnections, ok := infoMap["active_connections"].(int); ok {
|
||||
if activeConnections != 3 {
|
||||
@@ -2469,6 +2506,7 @@ func TestBatcherMultiConnection(t *testing.T) {
|
||||
t.Logf("SUCCESS: Node1 correctly shows 3 active connections")
|
||||
}
|
||||
}
|
||||
|
||||
if connected, ok := infoMap["connected"].(bool); ok && !connected {
|
||||
t.Errorf("Node1 should show as connected with 3 active connections")
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package mapper
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/netip"
|
||||
"sort"
|
||||
"time"
|
||||
@@ -36,6 +35,7 @@ const (
|
||||
// NewMapResponseBuilder creates a new builder with basic fields set.
|
||||
func (m *mapper) NewMapResponseBuilder(nodeID types.NodeID) *MapResponseBuilder {
|
||||
now := time.Now()
|
||||
|
||||
return &MapResponseBuilder{
|
||||
resp: &tailcfg.MapResponse{
|
||||
KeepAlive: false,
|
||||
@@ -69,7 +69,7 @@ func (b *MapResponseBuilder) WithCapabilityVersion(capVer tailcfg.CapabilityVers
|
||||
func (b *MapResponseBuilder) WithSelfNode() *MapResponseBuilder {
|
||||
nv, ok := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
if !ok {
|
||||
b.addError(errors.New("node not found"))
|
||||
b.addError(ErrNodeNotFoundMapper)
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -123,6 +123,7 @@ func (b *MapResponseBuilder) WithDebugConfig() *MapResponseBuilder {
|
||||
b.resp.Debug = &tailcfg.Debug{
|
||||
DisableLogTail: !b.mapper.cfg.LogTail.Enabled,
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -130,7 +131,7 @@ func (b *MapResponseBuilder) WithDebugConfig() *MapResponseBuilder {
|
||||
func (b *MapResponseBuilder) WithSSHPolicy() *MapResponseBuilder {
|
||||
node, ok := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
if !ok {
|
||||
b.addError(errors.New("node not found"))
|
||||
b.addError(ErrNodeNotFoundMapper)
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -149,7 +150,7 @@ func (b *MapResponseBuilder) WithSSHPolicy() *MapResponseBuilder {
|
||||
func (b *MapResponseBuilder) WithDNSConfig() *MapResponseBuilder {
|
||||
node, ok := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
if !ok {
|
||||
b.addError(errors.New("node not found"))
|
||||
b.addError(ErrNodeNotFoundMapper)
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -162,7 +163,7 @@ func (b *MapResponseBuilder) WithDNSConfig() *MapResponseBuilder {
|
||||
func (b *MapResponseBuilder) WithUserProfiles(peers views.Slice[types.NodeView]) *MapResponseBuilder {
|
||||
node, ok := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
if !ok {
|
||||
b.addError(errors.New("node not found"))
|
||||
b.addError(ErrNodeNotFoundMapper)
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -175,7 +176,7 @@ func (b *MapResponseBuilder) WithUserProfiles(peers views.Slice[types.NodeView])
|
||||
func (b *MapResponseBuilder) WithPacketFilters() *MapResponseBuilder {
|
||||
node, ok := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
if !ok {
|
||||
b.addError(errors.New("node not found"))
|
||||
b.addError(ErrNodeNotFoundMapper)
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -229,7 +230,7 @@ func (b *MapResponseBuilder) WithPeerChanges(peers views.Slice[types.NodeView])
|
||||
func (b *MapResponseBuilder) buildTailPeers(peers views.Slice[types.NodeView]) ([]*tailcfg.Node, error) {
|
||||
node, ok := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
if !ok {
|
||||
return nil, errors.New("node not found")
|
||||
return nil, ErrNodeNotFoundMapper
|
||||
}
|
||||
|
||||
// Get unreduced matchers for peer relationship determination.
|
||||
@@ -276,20 +277,22 @@ func (b *MapResponseBuilder) WithPeerChangedPatch(changes []*tailcfg.PeerChange)
|
||||
|
||||
// WithPeersRemoved adds removed peer IDs.
|
||||
func (b *MapResponseBuilder) WithPeersRemoved(removedIDs ...types.NodeID) *MapResponseBuilder {
|
||||
var tailscaleIDs []tailcfg.NodeID
|
||||
tailscaleIDs := make([]tailcfg.NodeID, 0, len(removedIDs))
|
||||
for _, id := range removedIDs {
|
||||
tailscaleIDs = append(tailscaleIDs, id.NodeID())
|
||||
}
|
||||
|
||||
b.resp.PeersRemoved = tailscaleIDs
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// Build finalizes the response and returns marshaled bytes
|
||||
// Build finalizes the response and returns marshaled bytes.
|
||||
func (b *MapResponseBuilder) Build() (*tailcfg.MapResponse, error) {
|
||||
if len(b.errs) > 0 {
|
||||
return nil, multierr.New(b.errs...)
|
||||
}
|
||||
|
||||
if debugDumpMapResponsePath != "" {
|
||||
writeDebugMapResponse(b.resp, b.debugType, b.nodeID)
|
||||
}
|
||||
|
||||
@@ -339,8 +339,8 @@ func TestMapResponseBuilder_MultipleErrors(t *testing.T) {
|
||||
|
||||
// Build should return a multierr
|
||||
data, err := result.Build()
|
||||
assert.Nil(t, data)
|
||||
assert.Error(t, err)
|
||||
require.Nil(t, data)
|
||||
require.Error(t, err)
|
||||
|
||||
// The error should contain information about multiple errors
|
||||
assert.Contains(t, err.Error(), "multiple errors")
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
|
||||
const (
|
||||
nextDNSDoHPrefix = "https://dns.nextdns.io"
|
||||
mapperIDLength = 8
|
||||
debugMapResponsePerm = 0o755
|
||||
)
|
||||
|
||||
@@ -50,6 +49,7 @@ type mapper struct {
|
||||
created time.Time
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
type patch struct {
|
||||
timestamp time.Time
|
||||
change *tailcfg.PeerChange
|
||||
@@ -60,7 +60,6 @@ func newMapper(
|
||||
state *state.State,
|
||||
) *mapper {
|
||||
// uid, _ := util.GenerateRandomStringDNSSafe(mapperIDLength)
|
||||
|
||||
return &mapper{
|
||||
state: state,
|
||||
cfg: cfg,
|
||||
@@ -76,12 +75,26 @@ func generateUserProfiles(
|
||||
) []tailcfg.UserProfile {
|
||||
userMap := make(map[uint]*types.UserView)
|
||||
ids := make([]uint, 0, len(userMap))
|
||||
|
||||
user := node.Owner()
|
||||
if !user.Valid() {
|
||||
log.Error().
|
||||
EmbedObject(node).
|
||||
Msg("node has no valid owner, skipping user profile generation")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
userID := user.Model().ID
|
||||
userMap[userID] = &user
|
||||
ids = append(ids, userID)
|
||||
|
||||
for _, peer := range peers.All() {
|
||||
peerUser := peer.Owner()
|
||||
if !peerUser.Valid() {
|
||||
continue
|
||||
}
|
||||
|
||||
peerUserID := peerUser.Model().ID
|
||||
userMap[peerUserID] = &peerUser
|
||||
ids = append(ids, peerUserID)
|
||||
@@ -89,7 +102,9 @@ func generateUserProfiles(
|
||||
|
||||
slices.Sort(ids)
|
||||
ids = slices.Compact(ids)
|
||||
|
||||
var profiles []tailcfg.UserProfile
|
||||
|
||||
for _, id := range ids {
|
||||
if userMap[id] != nil {
|
||||
profiles = append(profiles, userMap[id].TailscaleUserProfile())
|
||||
@@ -139,6 +154,8 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, node types.NodeView) {
|
||||
}
|
||||
|
||||
// fullMapResponse returns a MapResponse for the given node.
|
||||
//
|
||||
//nolint:unused
|
||||
func (m *mapper) fullMapResponse(
|
||||
nodeID types.NodeID,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
@@ -306,6 +323,7 @@ func writeDebugMapResponse(
|
||||
|
||||
perms := fs.FileMode(debugMapResponsePerm)
|
||||
mPath := path.Join(debugDumpMapResponsePath, fmt.Sprintf("%d", nodeID))
|
||||
|
||||
err = os.MkdirAll(mPath, perms)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -318,7 +336,8 @@ func writeDebugMapResponse(
|
||||
fmt.Sprintf("%s-%s.json", now, t),
|
||||
)
|
||||
|
||||
log.Trace().Msgf("Writing MapResponse to %s", mapResponsePath)
|
||||
log.Trace().Msgf("writing MapResponse to %s", mapResponsePath)
|
||||
|
||||
err = os.WriteFile(mapResponsePath, body, perms)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -327,7 +346,7 @@ func writeDebugMapResponse(
|
||||
|
||||
func (m *mapper) debugMapResponses() (map[types.NodeID][]tailcfg.MapResponse, error) {
|
||||
if debugDumpMapResponsePath == "" {
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil // intentional: no data when debug path not set
|
||||
}
|
||||
|
||||
return ReadMapResponsesFromDirectory(debugDumpMapResponsePath)
|
||||
@@ -340,6 +359,7 @@ func ReadMapResponsesFromDirectory(dir string) (map[types.NodeID][]tailcfg.MapRe
|
||||
}
|
||||
|
||||
result := make(map[types.NodeID][]tailcfg.MapResponse)
|
||||
|
||||
for _, node := range nodes {
|
||||
if !node.IsDir() {
|
||||
continue
|
||||
@@ -347,7 +367,7 @@ func ReadMapResponsesFromDirectory(dir string) (map[types.NodeID][]tailcfg.MapRe
|
||||
|
||||
nodeIDu, err := strconv.ParseUint(node.Name(), 10, 64)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Parsing node ID from dir %s", node.Name())
|
||||
log.Error().Err(err).Msgf("parsing node ID from dir %s", node.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -355,7 +375,7 @@ func ReadMapResponsesFromDirectory(dir string) (map[types.NodeID][]tailcfg.MapRe
|
||||
|
||||
files, err := os.ReadDir(path.Join(dir, node.Name()))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Reading dir %s", node.Name())
|
||||
log.Error().Err(err).Msgf("reading dir %s", node.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -370,14 +390,15 @@ func ReadMapResponsesFromDirectory(dir string) (map[types.NodeID][]tailcfg.MapRe
|
||||
|
||||
body, err := os.ReadFile(path.Join(dir, node.Name(), file.Name()))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Reading file %s", file.Name())
|
||||
log.Error().Err(err).Msgf("reading file %s", file.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
var resp tailcfg.MapResponse
|
||||
|
||||
err = json.Unmarshal(body, &resp)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Unmarshalling file %s", file.Name())
|
||||
log.Error().Err(err).Msgf("unmarshalling file %s", file.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -3,18 +3,13 @@ package mapper
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/policy/matcher"
|
||||
"github.com/juanfont/headscale/hscontrol/routes"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
var iap = func(ipStr string) *netip.Addr {
|
||||
@@ -51,7 +46,7 @@ func TestDNSConfigMapResponse(t *testing.T) {
|
||||
mach := func(hostname, username string, userid uint) *types.Node {
|
||||
return &types.Node{
|
||||
Hostname: hostname,
|
||||
UserID: ptr.To(userid),
|
||||
UserID: new(userid),
|
||||
User: &types.User{
|
||||
Name: username,
|
||||
},
|
||||
@@ -81,90 +76,3 @@ func TestDNSConfigMapResponse(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// mockState is a mock implementation that provides the required methods.
|
||||
type mockState struct {
|
||||
polMan policy.PolicyManager
|
||||
derpMap *tailcfg.DERPMap
|
||||
primary *routes.PrimaryRoutes
|
||||
nodes types.Nodes
|
||||
peers types.Nodes
|
||||
}
|
||||
|
||||
func (m *mockState) DERPMap() *tailcfg.DERPMap {
|
||||
return m.derpMap
|
||||
}
|
||||
|
||||
func (m *mockState) Filter() ([]tailcfg.FilterRule, []matcher.Match) {
|
||||
if m.polMan == nil {
|
||||
return tailcfg.FilterAllowAll, nil
|
||||
}
|
||||
return m.polMan.Filter()
|
||||
}
|
||||
|
||||
func (m *mockState) SSHPolicy(node types.NodeView) (*tailcfg.SSHPolicy, error) {
|
||||
if m.polMan == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return m.polMan.SSHPolicy(node)
|
||||
}
|
||||
|
||||
func (m *mockState) NodeCanHaveTag(node types.NodeView, tag string) bool {
|
||||
if m.polMan == nil {
|
||||
return false
|
||||
}
|
||||
return m.polMan.NodeCanHaveTag(node, tag)
|
||||
}
|
||||
|
||||
func (m *mockState) GetNodePrimaryRoutes(nodeID types.NodeID) []netip.Prefix {
|
||||
if m.primary == nil {
|
||||
return nil
|
||||
}
|
||||
return m.primary.PrimaryRoutes(nodeID)
|
||||
}
|
||||
|
||||
func (m *mockState) ListPeers(nodeID types.NodeID, peerIDs ...types.NodeID) (types.Nodes, error) {
|
||||
if len(peerIDs) > 0 {
|
||||
// Filter peers by the provided IDs
|
||||
var filtered types.Nodes
|
||||
for _, peer := range m.peers {
|
||||
if slices.Contains(peerIDs, peer.ID) {
|
||||
filtered = append(filtered, peer)
|
||||
}
|
||||
}
|
||||
|
||||
return filtered, nil
|
||||
}
|
||||
// Return all peers except the node itself
|
||||
var filtered types.Nodes
|
||||
for _, peer := range m.peers {
|
||||
if peer.ID != nodeID {
|
||||
filtered = append(filtered, peer)
|
||||
}
|
||||
}
|
||||
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
func (m *mockState) ListNodes(nodeIDs ...types.NodeID) (types.Nodes, error) {
|
||||
if len(nodeIDs) > 0 {
|
||||
// Filter nodes by the provided IDs
|
||||
var filtered types.Nodes
|
||||
for _, node := range m.nodes {
|
||||
if slices.Contains(nodeIDs, node.ID) {
|
||||
filtered = append(filtered, node)
|
||||
}
|
||||
}
|
||||
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
return m.nodes, nil
|
||||
}
|
||||
|
||||
func Test_fullMapResponse(t *testing.T) {
|
||||
t.Skip("Test needs to be refactored for new state-based architecture")
|
||||
// TODO: Refactor this test to work with the new state-based mapper
|
||||
// The test architecture needs to be updated to work with the state interface
|
||||
// instead of the old direct dependency injection pattern
|
||||
}
|
||||
|
||||
@@ -13,12 +13,12 @@ import (
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
func TestTailNode(t *testing.T) {
|
||||
mustNK := func(str string) key.NodePublic {
|
||||
var k key.NodePublic
|
||||
|
||||
_ = k.UnmarshalText([]byte(str))
|
||||
|
||||
return k
|
||||
@@ -26,6 +26,7 @@ func TestTailNode(t *testing.T) {
|
||||
|
||||
mustDK := func(str string) key.DiscoPublic {
|
||||
var k key.DiscoPublic
|
||||
|
||||
_ = k.UnmarshalText([]byte(str))
|
||||
|
||||
return k
|
||||
@@ -33,6 +34,7 @@ func TestTailNode(t *testing.T) {
|
||||
|
||||
mustMK := func(str string) key.MachinePublic {
|
||||
var k key.MachinePublic
|
||||
|
||||
_ = k.UnmarshalText([]byte(str))
|
||||
|
||||
return k
|
||||
@@ -95,7 +97,7 @@ func TestTailNode(t *testing.T) {
|
||||
IPv4: iap("100.64.0.1"),
|
||||
Hostname: "mini",
|
||||
GivenName: "mini",
|
||||
UserID: ptr.To(uint(0)),
|
||||
UserID: new(uint(0)),
|
||||
User: &types.User{
|
||||
Name: "mini",
|
||||
},
|
||||
@@ -137,8 +139,8 @@ func TestTailNode(t *testing.T) {
|
||||
Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")},
|
||||
AllowedIPs: []netip.Prefix{
|
||||
tsaddr.AllIPv4(),
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
netip.MustParsePrefix("100.64.0.1/32"),
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
tsaddr.AllIPv6(),
|
||||
},
|
||||
PrimaryRoutes: []netip.Prefix{
|
||||
@@ -255,7 +257,7 @@ func TestNodeExpiry(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "localtime",
|
||||
exp: tp(time.Time{}.Local()),
|
||||
exp: tp(time.Time{}.Local()), //nolint:gosmopolitan
|
||||
wantTimeZero: true,
|
||||
},
|
||||
}
|
||||
@@ -284,7 +286,9 @@ func TestNodeExpiry(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("nodeExpiry() error = %v", err)
|
||||
}
|
||||
|
||||
var deseri tailcfg.Node
|
||||
|
||||
err = json.Unmarshal(seri, &deseri)
|
||||
if err != nil {
|
||||
t.Fatalf("nodeExpiry() error = %v", err)
|
||||
|
||||
@@ -71,6 +71,7 @@ func prometheusMiddleware(next http.Handler) http.Handler {
|
||||
rw := &respWriterProm{ResponseWriter: w}
|
||||
|
||||
timer := prometheus.NewTimer(httpDuration.WithLabelValues(path))
|
||||
|
||||
next.ServeHTTP(rw, r)
|
||||
timer.ObserveDuration()
|
||||
httpCounter.WithLabelValues(strconv.Itoa(rw.status), r.Method, path).Inc()
|
||||
@@ -79,6 +80,7 @@ func prometheusMiddleware(next http.Handler) http.Handler {
|
||||
|
||||
type respWriterProm struct {
|
||||
http.ResponseWriter
|
||||
|
||||
status int
|
||||
written int64
|
||||
wroteHeader bool
|
||||
@@ -94,6 +96,7 @@ func (r *respWriterProm) Write(b []byte) (int, error) {
|
||||
if !r.wroteHeader {
|
||||
r.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
n, err := r.ResponseWriter.Write(b)
|
||||
r.written += int64(n)
|
||||
|
||||
|
||||
@@ -19,6 +19,9 @@ import (
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
// ErrUnsupportedClientVersion is returned when a client connects with an unsupported protocol version.
|
||||
var ErrUnsupportedClientVersion = errors.New("unsupported client version")
|
||||
|
||||
const (
|
||||
// ts2021UpgradePath is the path that the server listens on for the WebSockets upgrade.
|
||||
ts2021UpgradePath = "/ts2021"
|
||||
@@ -51,7 +54,7 @@ func (h *Headscale) NoiseUpgradeHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
log.Trace().Caller().Msgf("Noise upgrade handler for client %s", req.RemoteAddr)
|
||||
log.Trace().Caller().Msgf("noise upgrade handler for client %s", req.RemoteAddr)
|
||||
|
||||
upgrade := req.Header.Get("Upgrade")
|
||||
if upgrade == "" {
|
||||
@@ -60,7 +63,7 @@ func (h *Headscale) NoiseUpgradeHandler(
|
||||
// be passed to Headscale. Let's give them a hint.
|
||||
log.Warn().
|
||||
Caller().
|
||||
Msg("No Upgrade header in TS2021 request. If headscale is behind a reverse proxy, make sure it is configured to pass WebSockets through.")
|
||||
Msg("no upgrade header in TS2021 request. If headscale is behind a reverse proxy, make sure it is configured to pass WebSockets through.")
|
||||
http.Error(writer, "Internal error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
@@ -79,7 +82,7 @@ func (h *Headscale) NoiseUpgradeHandler(
|
||||
noiseServer.earlyNoise,
|
||||
)
|
||||
if err != nil {
|
||||
httpError(writer, fmt.Errorf("noise upgrade failed: %w", err))
|
||||
httpError(writer, fmt.Errorf("upgrading noise connection: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -117,7 +120,7 @@ func (h *Headscale) NoiseUpgradeHandler(
|
||||
}
|
||||
|
||||
func unsupportedClientError(version tailcfg.CapabilityVersion) error {
|
||||
return fmt.Errorf("unsupported client version: %s (%d)", capver.TailscaleVersion(version), version)
|
||||
return fmt.Errorf("%w: %s (%d)", ErrUnsupportedClientVersion, capver.TailscaleVersion(version), version)
|
||||
}
|
||||
|
||||
func (ns *noiseServer) earlyNoise(protocolVersion int, writer io.Writer) error {
|
||||
@@ -137,17 +140,20 @@ func (ns *noiseServer) earlyNoise(protocolVersion int, writer io.Writer) error {
|
||||
// an HTTP/2 settings frame, which isn't of type 'T')
|
||||
var notH2Frame [5]byte
|
||||
copy(notH2Frame[:], earlyPayloadMagic)
|
||||
|
||||
var lenBuf [4]byte
|
||||
binary.BigEndian.PutUint32(lenBuf[:], uint32(len(earlyJSON)))
|
||||
binary.BigEndian.PutUint32(lenBuf[:], uint32(len(earlyJSON))) //nolint:gosec // JSON length is bounded
|
||||
// These writes are all buffered by caller, so fine to do them
|
||||
// separately:
|
||||
if _, err := writer.Write(notH2Frame[:]); err != nil {
|
||||
if _, err := writer.Write(notH2Frame[:]); err != nil { //nolint:noinlineerr
|
||||
return err
|
||||
}
|
||||
if _, err := writer.Write(lenBuf[:]); err != nil {
|
||||
|
||||
if _, err := writer.Write(lenBuf[:]); err != nil { //nolint:noinlineerr
|
||||
return err
|
||||
}
|
||||
if _, err := writer.Write(earlyJSON); err != nil {
|
||||
|
||||
if _, err := writer.Write(earlyJSON); err != nil { //nolint:noinlineerr
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -199,7 +205,7 @@ func (ns *noiseServer) NoisePollNetMapHandler(
|
||||
body, _ := io.ReadAll(req.Body)
|
||||
|
||||
var mapRequest tailcfg.MapRequest
|
||||
if err := json.Unmarshal(body, &mapRequest); err != nil {
|
||||
if err := json.Unmarshal(body, &mapRequest); err != nil { //nolint:noinlineerr
|
||||
httpError(writer, err)
|
||||
return
|
||||
}
|
||||
@@ -218,7 +224,8 @@ func (ns *noiseServer) NoisePollNetMapHandler(
|
||||
ns.nodeKey = nv.NodeKey()
|
||||
|
||||
sess := ns.headscale.newMapSession(req.Context(), mapRequest, writer, nv.AsStruct())
|
||||
sess.tracef("a node sending a MapRequest with Noise protocol")
|
||||
sess.log.Trace().Caller().Msg("a node sending a MapRequest with Noise protocol")
|
||||
|
||||
if !sess.isStreaming() {
|
||||
sess.serve()
|
||||
} else {
|
||||
@@ -241,14 +248,16 @@ func (ns *noiseServer) NoiseRegistrationHandler(
|
||||
return
|
||||
}
|
||||
|
||||
registerRequest, registerResponse := func() (*tailcfg.RegisterRequest, *tailcfg.RegisterResponse) {
|
||||
registerRequest, registerResponse := func() (*tailcfg.RegisterRequest, *tailcfg.RegisterResponse) { //nolint:contextcheck
|
||||
var resp *tailcfg.RegisterResponse
|
||||
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return &tailcfg.RegisterRequest{}, regErr(err)
|
||||
}
|
||||
|
||||
var regReq tailcfg.RegisterRequest
|
||||
if err := json.Unmarshal(body, ®Req); err != nil {
|
||||
if err := json.Unmarshal(body, ®Req); err != nil { //nolint:noinlineerr
|
||||
return ®Req, regErr(err)
|
||||
}
|
||||
|
||||
@@ -256,11 +265,11 @@ func (ns *noiseServer) NoiseRegistrationHandler(
|
||||
|
||||
resp, err = ns.headscale.handleRegister(req.Context(), regReq, ns.conn.Peer())
|
||||
if err != nil {
|
||||
var httpErr HTTPError
|
||||
if errors.As(err, &httpErr) {
|
||||
if httpErr, ok := errors.AsType[HTTPError](err); ok {
|
||||
resp = &tailcfg.RegisterResponse{
|
||||
Error: httpErr.Msg,
|
||||
}
|
||||
|
||||
return ®Req, resp
|
||||
}
|
||||
|
||||
@@ -278,8 +287,9 @@ func (ns *noiseServer) NoiseRegistrationHandler(
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
|
||||
if err := json.NewEncoder(writer).Encode(registerResponse); err != nil {
|
||||
log.Error().Caller().Err(err).Msg("NoiseRegistrationHandler: failed to encode RegisterResponse")
|
||||
err := json.NewEncoder(writer).Encode(registerResponse)
|
||||
if err != nil {
|
||||
log.Error().Caller().Err(err).Msg("noise registration handler: failed to encode RegisterResponse")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -32,8 +32,8 @@ const (
|
||||
|
||||
var (
|
||||
errEmptyOIDCCallbackParams = errors.New("empty OIDC callback params")
|
||||
errNoOIDCIDToken = errors.New("could not extract ID Token for OIDC callback")
|
||||
errNoOIDCRegistrationInfo = errors.New("could not get registration info from cache")
|
||||
errNoOIDCIDToken = errors.New("extracting ID token")
|
||||
errNoOIDCRegistrationInfo = errors.New("registration info not in cache")
|
||||
errOIDCAllowedDomains = errors.New(
|
||||
"authenticated principal does not match any allowed domain",
|
||||
)
|
||||
@@ -68,7 +68,7 @@ func NewAuthProviderOIDC(
|
||||
) (*AuthProviderOIDC, error) {
|
||||
var err error
|
||||
// grab oidc config if it hasn't been already
|
||||
oidcProvider, err := oidc.NewProvider(context.Background(), cfg.Issuer)
|
||||
oidcProvider, err := oidc.NewProvider(context.Background(), cfg.Issuer) //nolint:contextcheck
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating OIDC provider from issuer config: %w", err)
|
||||
}
|
||||
@@ -163,13 +163,14 @@ func (a *AuthProviderOIDC) RegisterHandler(
|
||||
for k, v := range a.cfg.ExtraParams {
|
||||
extras = append(extras, oauth2.SetAuthURLParam(k, v))
|
||||
}
|
||||
|
||||
extras = append(extras, oidc.Nonce(nonce))
|
||||
|
||||
// Cache the registration info
|
||||
a.registrationCache.Set(state, registrationInfo)
|
||||
|
||||
authURL := a.oauth2Config.AuthCodeURL(state, extras...)
|
||||
log.Debug().Caller().Msgf("Redirecting to %s for authentication", authURL)
|
||||
log.Debug().Caller().Msgf("redirecting to %s for authentication", authURL)
|
||||
|
||||
http.Redirect(writer, req, authURL, http.StatusFound)
|
||||
}
|
||||
@@ -190,6 +191,7 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
}
|
||||
|
||||
stateCookieName := getCookieName("state", state)
|
||||
|
||||
cookieState, err := req.Cookie(stateCookieName)
|
||||
if err != nil {
|
||||
httpError(writer, NewHTTPError(http.StatusBadRequest, "state not found", err))
|
||||
@@ -212,17 +214,20 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
httpError(writer, err)
|
||||
return
|
||||
}
|
||||
|
||||
if idToken.Nonce == "" {
|
||||
httpError(writer, NewHTTPError(http.StatusBadRequest, "nonce not found in IDToken", err))
|
||||
return
|
||||
}
|
||||
|
||||
nonceCookieName := getCookieName("nonce", idToken.Nonce)
|
||||
|
||||
nonce, err := req.Cookie(nonceCookieName)
|
||||
if err != nil {
|
||||
httpError(writer, NewHTTPError(http.StatusBadRequest, "nonce not found", err))
|
||||
return
|
||||
}
|
||||
|
||||
if idToken.Nonce != nonce.Value {
|
||||
httpError(writer, NewHTTPError(http.StatusForbidden, "nonce did not match", nil))
|
||||
return
|
||||
@@ -231,7 +236,7 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
nodeExpiry := a.determineNodeExpiry(idToken.Expiry)
|
||||
|
||||
var claims types.OIDCClaims
|
||||
if err := idToken.Claims(&claims); err != nil {
|
||||
if err := idToken.Claims(&claims); err != nil { //nolint:noinlineerr
|
||||
httpError(writer, fmt.Errorf("decoding ID token claims: %w", err))
|
||||
return
|
||||
}
|
||||
@@ -239,6 +244,7 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
// Fetch user information (email, groups, name, etc) from the userinfo endpoint
|
||||
// https://openid.net/specs/openid-connect-core-1_0.html#UserInfo
|
||||
var userinfo *oidc.UserInfo
|
||||
|
||||
userinfo, err = a.oidcProvider.UserInfo(req.Context(), oauth2.StaticTokenSource(oauth2Token))
|
||||
if err != nil {
|
||||
util.LogErr(err, "could not get userinfo; only using claims from id token")
|
||||
@@ -255,6 +261,7 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
claims.EmailVerified = cmp.Or(userinfo2.EmailVerified, claims.EmailVerified)
|
||||
claims.Username = cmp.Or(userinfo2.PreferredUsername, claims.Username)
|
||||
claims.Name = cmp.Or(userinfo2.Name, claims.Name)
|
||||
|
||||
claims.ProfilePictureURL = cmp.Or(userinfo2.Picture, claims.ProfilePictureURL)
|
||||
if userinfo2.Groups != nil {
|
||||
claims.Groups = userinfo2.Groups
|
||||
@@ -279,6 +286,7 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
Msgf("could not create or update user")
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
_, werr := writer.Write([]byte("Could not create or update user"))
|
||||
if werr != nil {
|
||||
log.Error().
|
||||
@@ -299,6 +307,7 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
// Register the node if it does not exist.
|
||||
if registrationId != nil {
|
||||
verb := "Reauthenticated"
|
||||
|
||||
newNode, err := a.handleRegistration(user, *registrationId, nodeExpiry)
|
||||
if err != nil {
|
||||
if errors.Is(err, db.ErrNodeNotFoundRegistrationCache) {
|
||||
@@ -307,7 +316,9 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
httpError(writer, err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -316,15 +327,12 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
}
|
||||
|
||||
// TODO(kradalby): replace with go-elem
|
||||
content, err := renderOIDCCallbackTemplate(user, verb)
|
||||
if err != nil {
|
||||
httpError(writer, err)
|
||||
return
|
||||
}
|
||||
content := renderOIDCCallbackTemplate(user, verb)
|
||||
|
||||
writer.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
if _, err := writer.Write(content.Bytes()); err != nil {
|
||||
|
||||
if _, err := writer.Write(content.Bytes()); err != nil { //nolint:noinlineerr
|
||||
util.LogErr(err, "Failed to write HTTP response")
|
||||
}
|
||||
|
||||
@@ -370,6 +378,7 @@ func (a *AuthProviderOIDC) getOauth2Token(
|
||||
if !ok {
|
||||
return nil, NewHTTPError(http.StatusNotFound, "registration not found", errNoOIDCRegistrationInfo)
|
||||
}
|
||||
|
||||
if regInfo.Verifier != nil {
|
||||
exchangeOpts = []oauth2.AuthCodeOption{oauth2.VerifierOption(*regInfo.Verifier)}
|
||||
}
|
||||
@@ -377,7 +386,7 @@ func (a *AuthProviderOIDC) getOauth2Token(
|
||||
|
||||
oauth2Token, err := a.oauth2Config.Exchange(ctx, code, exchangeOpts...)
|
||||
if err != nil {
|
||||
return nil, NewHTTPError(http.StatusForbidden, "invalid code", fmt.Errorf("could not exchange code for token: %w", err))
|
||||
return nil, NewHTTPError(http.StatusForbidden, "invalid code", fmt.Errorf("exchanging code for token: %w", err))
|
||||
}
|
||||
|
||||
return oauth2Token, err
|
||||
@@ -394,9 +403,10 @@ func (a *AuthProviderOIDC) extractIDToken(
|
||||
}
|
||||
|
||||
verifier := a.oidcProvider.Verifier(&oidc.Config{ClientID: a.cfg.ClientID})
|
||||
|
||||
idToken, err := verifier.Verify(ctx, rawIDToken)
|
||||
if err != nil {
|
||||
return nil, NewHTTPError(http.StatusForbidden, "failed to verify id_token", fmt.Errorf("failed to verify ID token: %w", err))
|
||||
return nil, NewHTTPError(http.StatusForbidden, "failed to verify id_token", fmt.Errorf("verifying ID token: %w", err))
|
||||
}
|
||||
|
||||
return idToken, nil
|
||||
@@ -516,6 +526,7 @@ func (a *AuthProviderOIDC) createOrUpdateUserFromClaim(
|
||||
newUser bool
|
||||
c change.Change
|
||||
)
|
||||
|
||||
user, err = a.h.state.GetUserByOIDCIdentifier(claims.Identifier())
|
||||
if err != nil && !errors.Is(err, db.ErrUserNotFound) {
|
||||
return nil, change.Change{}, fmt.Errorf("creating or updating user: %w", err)
|
||||
@@ -561,7 +572,7 @@ func (a *AuthProviderOIDC) handleRegistration(
|
||||
util.RegisterMethodOIDC,
|
||||
)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("could not register node: %w", err)
|
||||
return false, fmt.Errorf("registering node: %w", err)
|
||||
}
|
||||
|
||||
// This is a bit of a back and forth, but we have a bit of a chicken and egg
|
||||
@@ -589,9 +600,9 @@ func (a *AuthProviderOIDC) handleRegistration(
|
||||
func renderOIDCCallbackTemplate(
|
||||
user *types.User,
|
||||
verb string,
|
||||
) (*bytes.Buffer, error) {
|
||||
) *bytes.Buffer {
|
||||
html := templates.OIDCCallback(user.Display(), verb).Render()
|
||||
return bytes.NewBufferString(html), nil
|
||||
return bytes.NewBufferString(html)
|
||||
}
|
||||
|
||||
// getCookieName generates a unique cookie name based on a cookie value.
|
||||
|
||||
@@ -19,7 +19,7 @@ func (h *Headscale) WindowsConfigMessage(
|
||||
) {
|
||||
writer.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
writer.Write([]byte(templates.Windows(h.cfg.ServerURL).Render()))
|
||||
_, _ = writer.Write([]byte(templates.Windows(h.cfg.ServerURL).Render()))
|
||||
}
|
||||
|
||||
// AppleConfigMessage shows a simple message in the browser to point the user to the iOS/MacOS profile and instructions for how to install it.
|
||||
@@ -29,7 +29,7 @@ func (h *Headscale) AppleConfigMessage(
|
||||
) {
|
||||
writer.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
writer.Write([]byte(templates.Apple(h.cfg.ServerURL).Render()))
|
||||
_, _ = writer.Write([]byte(templates.Apple(h.cfg.ServerURL).Render()))
|
||||
}
|
||||
|
||||
func (h *Headscale) ApplePlatformConfig(
|
||||
@@ -37,6 +37,7 @@ func (h *Headscale) ApplePlatformConfig(
|
||||
req *http.Request,
|
||||
) {
|
||||
vars := mux.Vars(req)
|
||||
|
||||
platform, ok := vars["platform"]
|
||||
if !ok {
|
||||
httpError(writer, NewHTTPError(http.StatusBadRequest, "no platform specified", nil))
|
||||
@@ -64,17 +65,20 @@ func (h *Headscale) ApplePlatformConfig(
|
||||
|
||||
switch platform {
|
||||
case "macos-standalone":
|
||||
if err := macosStandaloneTemplate.Execute(&payload, platformConfig); err != nil {
|
||||
err := macosStandaloneTemplate.Execute(&payload, platformConfig)
|
||||
if err != nil {
|
||||
httpError(writer, err)
|
||||
return
|
||||
}
|
||||
case "macos-app-store":
|
||||
if err := macosAppStoreTemplate.Execute(&payload, platformConfig); err != nil {
|
||||
err := macosAppStoreTemplate.Execute(&payload, platformConfig)
|
||||
if err != nil {
|
||||
httpError(writer, err)
|
||||
return
|
||||
}
|
||||
case "ios":
|
||||
if err := iosTemplate.Execute(&payload, platformConfig); err != nil {
|
||||
err := iosTemplate.Execute(&payload, platformConfig)
|
||||
if err != nil {
|
||||
httpError(writer, err)
|
||||
return
|
||||
}
|
||||
@@ -90,7 +94,7 @@ func (h *Headscale) ApplePlatformConfig(
|
||||
}
|
||||
|
||||
var content bytes.Buffer
|
||||
if err := commonTemplate.Execute(&content, config); err != nil {
|
||||
if err := commonTemplate.Execute(&content, config); err != nil { //nolint:noinlineerr
|
||||
httpError(writer, err)
|
||||
return
|
||||
}
|
||||
@@ -98,7 +102,7 @@ func (h *Headscale) ApplePlatformConfig(
|
||||
writer.Header().
|
||||
Set("Content-Type", "application/x-apple-aspen-config; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
writer.Write(content.Bytes())
|
||||
_, _ = writer.Write(content.Bytes())
|
||||
}
|
||||
|
||||
type AppleMobileConfig struct {
|
||||
|
||||
@@ -16,15 +16,18 @@ type Match struct {
|
||||
dests *netipx.IPSet
|
||||
}
|
||||
|
||||
func (m Match) DebugString() string {
|
||||
func (m *Match) DebugString() string {
|
||||
var sb strings.Builder
|
||||
|
||||
sb.WriteString("Match:\n")
|
||||
sb.WriteString(" Sources:\n")
|
||||
|
||||
for _, prefix := range m.srcs.Prefixes() {
|
||||
sb.WriteString(" " + prefix.String() + "\n")
|
||||
}
|
||||
|
||||
sb.WriteString(" Destinations:\n")
|
||||
|
||||
for _, prefix := range m.dests.Prefixes() {
|
||||
sb.WriteString(" " + prefix.String() + "\n")
|
||||
}
|
||||
@@ -42,7 +45,7 @@ func MatchesFromFilterRules(rules []tailcfg.FilterRule) []Match {
|
||||
}
|
||||
|
||||
func MatchFromFilterRule(rule tailcfg.FilterRule) Match {
|
||||
dests := []string{}
|
||||
dests := make([]string, 0, len(rule.DstPorts))
|
||||
for _, dest := range rule.DstPorts {
|
||||
dests = append(dests, dest.IP)
|
||||
}
|
||||
@@ -93,11 +96,24 @@ func (m *Match) DestsOverlapsPrefixes(prefixes ...netip.Prefix) bool {
|
||||
return slices.ContainsFunc(prefixes, m.dests.OverlapsPrefix)
|
||||
}
|
||||
|
||||
// DestsIsTheInternet reports if the destination is equal to "the internet"
|
||||
// DestsIsTheInternet reports if the destination contains "the internet"
|
||||
// which is a IPSet that represents "autogroup:internet" and is special
|
||||
// cased for exit nodes.
|
||||
func (m Match) DestsIsTheInternet() bool {
|
||||
return m.dests.Equal(util.TheInternet()) ||
|
||||
m.dests.ContainsPrefix(tsaddr.AllIPv4()) ||
|
||||
m.dests.ContainsPrefix(tsaddr.AllIPv6())
|
||||
// This checks if dests is a superset of TheInternet(), which handles
|
||||
// merged filter rules where TheInternet is combined with other destinations.
|
||||
func (m *Match) DestsIsTheInternet() bool {
|
||||
if m.dests.ContainsPrefix(tsaddr.AllIPv4()) ||
|
||||
m.dests.ContainsPrefix(tsaddr.AllIPv6()) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check if dests contains all prefixes of TheInternet (superset check)
|
||||
theInternet := util.TheInternet()
|
||||
for _, prefix := range theInternet.Prefixes() {
|
||||
if !m.dests.ContainsPrefix(prefix) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -19,18 +19,18 @@ type PolicyManager interface {
|
||||
MatchersForNode(node types.NodeView) ([]matcher.Match, error)
|
||||
// BuildPeerMap constructs peer relationship maps for the given nodes
|
||||
BuildPeerMap(nodes views.Slice[types.NodeView]) map[types.NodeID][]types.NodeView
|
||||
SSHPolicy(types.NodeView) (*tailcfg.SSHPolicy, error)
|
||||
SetPolicy([]byte) (bool, error)
|
||||
SSHPolicy(node types.NodeView) (*tailcfg.SSHPolicy, error)
|
||||
SetPolicy(pol []byte) (bool, error)
|
||||
SetUsers(users []types.User) (bool, error)
|
||||
SetNodes(nodes views.Slice[types.NodeView]) (bool, error)
|
||||
// NodeCanHaveTag reports whether the given node can have the given tag.
|
||||
NodeCanHaveTag(types.NodeView, string) bool
|
||||
NodeCanHaveTag(node types.NodeView, tag string) bool
|
||||
|
||||
// TagExists reports whether the given tag is defined in the policy.
|
||||
TagExists(tag string) bool
|
||||
|
||||
// NodeCanApproveRoute reports whether the given node can approve the given route.
|
||||
NodeCanApproveRoute(types.NodeView, netip.Prefix) bool
|
||||
NodeCanApproveRoute(node types.NodeView, route netip.Prefix) bool
|
||||
|
||||
Version() int
|
||||
DebugString() string
|
||||
@@ -38,8 +38,11 @@ type PolicyManager interface {
|
||||
|
||||
// NewPolicyManager returns a new policy manager.
|
||||
func NewPolicyManager(pol []byte, users []types.User, nodes views.Slice[types.NodeView]) (PolicyManager, error) {
|
||||
var polMan PolicyManager
|
||||
var err error
|
||||
var (
|
||||
polMan PolicyManager
|
||||
err error
|
||||
)
|
||||
|
||||
polMan, err = policyv2.NewPolicyManager(pol, users, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -59,6 +62,7 @@ func PolicyManagersForTest(pol []byte, users []types.User, nodes views.Slice[typ
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
polMans = append(polMans, pm)
|
||||
}
|
||||
|
||||
@@ -66,7 +70,7 @@ func PolicyManagersForTest(pol []byte, users []types.User, nodes views.Slice[typ
|
||||
}
|
||||
|
||||
func PolicyManagerFuncsForTest(pol []byte) []func([]types.User, views.Slice[types.NodeView]) (PolicyManager, error) {
|
||||
var polmanFuncs []func([]types.User, views.Slice[types.NodeView]) (PolicyManager, error)
|
||||
polmanFuncs := make([]func([]types.User, views.Slice[types.NodeView]) (PolicyManager, error), 0, 1)
|
||||
|
||||
polmanFuncs = append(polmanFuncs, func(u []types.User, n views.Slice[types.NodeView]) (PolicyManager, error) {
|
||||
return policyv2.NewPolicyManager(pol, u, n)
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/samber/lo"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/types/views"
|
||||
)
|
||||
|
||||
@@ -111,7 +110,7 @@ func ApproveRoutesWithPolicy(pm PolicyManager, nv types.NodeView, currentApprove
|
||||
}
|
||||
|
||||
// Sort and deduplicate
|
||||
tsaddr.SortPrefixes(newApproved)
|
||||
slices.SortFunc(newApproved, netip.Prefix.Compare)
|
||||
newApproved = slices.Compact(newApproved)
|
||||
newApproved = lo.Filter(newApproved, func(route netip.Prefix, index int) bool {
|
||||
return route.IsValid()
|
||||
@@ -120,12 +119,13 @@ func ApproveRoutesWithPolicy(pm PolicyManager, nv types.NodeView, currentApprove
|
||||
// Sort the current approved for comparison
|
||||
sortedCurrent := make([]netip.Prefix, len(currentApproved))
|
||||
copy(sortedCurrent, currentApproved)
|
||||
tsaddr.SortPrefixes(sortedCurrent)
|
||||
slices.SortFunc(sortedCurrent, netip.Prefix.Compare)
|
||||
|
||||
// Only update if the routes actually changed
|
||||
if !slices.Equal(sortedCurrent, newApproved) {
|
||||
// Log what changed
|
||||
var added, kept []netip.Prefix
|
||||
|
||||
for _, route := range newApproved {
|
||||
if !slices.Contains(sortedCurrent, route) {
|
||||
added = append(added, route)
|
||||
@@ -136,8 +136,7 @@ func ApproveRoutesWithPolicy(pm PolicyManager, nv types.NodeView, currentApprove
|
||||
|
||||
if len(added) > 0 {
|
||||
log.Debug().
|
||||
Uint64("node.id", nv.ID().Uint64()).
|
||||
Str("node.name", nv.Hostname()).
|
||||
EmbedObject(nv).
|
||||
Strs("routes.added", util.PrefixesToString(added)).
|
||||
Strs("routes.kept", util.PrefixesToString(kept)).
|
||||
Int("routes.total", len(newApproved)).
|
||||
|
||||
@@ -3,16 +3,16 @@ package policy
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/types/views"
|
||||
)
|
||||
|
||||
@@ -32,10 +32,10 @@ func TestApproveRoutesWithPolicy_NeverRemovesApprovedRoutes(t *testing.T) {
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: "test-node",
|
||||
UserID: ptr.To(user1.ID),
|
||||
User: ptr.To(user1),
|
||||
UserID: new(user1.ID),
|
||||
User: new(user1),
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
|
||||
IPv4: new(netip.MustParseAddr("100.64.0.1")),
|
||||
Tags: []string{"tag:test"},
|
||||
}
|
||||
|
||||
@@ -44,10 +44,10 @@ func TestApproveRoutesWithPolicy_NeverRemovesApprovedRoutes(t *testing.T) {
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: "other-node",
|
||||
UserID: ptr.To(user2.ID),
|
||||
User: ptr.To(user2),
|
||||
UserID: new(user2.ID),
|
||||
User: new(user2),
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.2")),
|
||||
IPv4: new(netip.MustParseAddr("100.64.0.2")),
|
||||
}
|
||||
|
||||
// Create a policy that auto-approves specific routes
|
||||
@@ -76,7 +76,7 @@ func TestApproveRoutesWithPolicy_NeverRemovesApprovedRoutes(t *testing.T) {
|
||||
}`
|
||||
|
||||
pm, err := policyv2.NewPolicyManager([]byte(policyJSON), users, views.SliceOf([]types.NodeView{node1.View(), node2.View()}))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -194,7 +194,7 @@ func TestApproveRoutesWithPolicy_NeverRemovesApprovedRoutes(t *testing.T) {
|
||||
assert.Equal(t, tt.wantChanged, gotChanged, "changed flag mismatch: %s", tt.description)
|
||||
|
||||
// Sort for comparison since ApproveRoutesWithPolicy sorts the results
|
||||
tsaddr.SortPrefixes(tt.wantApproved)
|
||||
slices.SortFunc(tt.wantApproved, netip.Prefix.Compare)
|
||||
assert.Equal(t, tt.wantApproved, gotApproved, "approved routes mismatch: %s", tt.description)
|
||||
|
||||
// Verify that all previously approved routes are still present
|
||||
@@ -304,20 +304,23 @@ func TestApproveRoutesWithPolicy_NilAndEmptyCases(t *testing.T) {
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: "testnode",
|
||||
UserID: ptr.To(user.ID),
|
||||
User: ptr.To(user),
|
||||
UserID: new(user.ID),
|
||||
User: new(user),
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
|
||||
IPv4: new(netip.MustParseAddr("100.64.0.1")),
|
||||
ApprovedRoutes: tt.currentApproved,
|
||||
}
|
||||
nodes := types.Nodes{&node}
|
||||
|
||||
// Create policy manager or use nil if specified
|
||||
var pm PolicyManager
|
||||
var err error
|
||||
var (
|
||||
pm PolicyManager
|
||||
err error
|
||||
)
|
||||
|
||||
if tt.name != "nil_policy_manager" {
|
||||
pm, err = pmf(users, nodes.ViewSlice())
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
pm = nil
|
||||
}
|
||||
@@ -330,7 +333,7 @@ func TestApproveRoutesWithPolicy_NilAndEmptyCases(t *testing.T) {
|
||||
if tt.wantApproved == nil {
|
||||
assert.Nil(t, gotApproved, "expected nil approved routes")
|
||||
} else {
|
||||
tsaddr.SortPrefixes(tt.wantApproved)
|
||||
slices.SortFunc(tt.wantApproved, netip.Prefix.Compare)
|
||||
assert.Equal(t, tt.wantApproved, gotApproved, "approved routes mismatch")
|
||||
}
|
||||
})
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
func TestApproveRoutesWithPolicy_NeverRemovesRoutes(t *testing.T) {
|
||||
@@ -92,8 +91,8 @@ func TestApproveRoutesWithPolicy_NeverRemovesRoutes(t *testing.T) {
|
||||
announcedRoutes: []netip.Prefix{}, // No routes announced anymore
|
||||
nodeUser: "test",
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("172.16.0.0/16"),
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
netip.MustParsePrefix("172.16.0.0/16"),
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
},
|
||||
wantChanged: false,
|
||||
@@ -124,8 +123,8 @@ func TestApproveRoutesWithPolicy_NeverRemovesRoutes(t *testing.T) {
|
||||
nodeUser: "test",
|
||||
nodeTags: []string{"tag:approved"},
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("172.16.0.0/16"), // New tag-approved
|
||||
netip.MustParsePrefix("10.0.0.0/24"), // Previous approval preserved
|
||||
netip.MustParsePrefix("172.16.0.0/16"), // New tag-approved
|
||||
},
|
||||
wantChanged: true,
|
||||
},
|
||||
@@ -168,13 +167,13 @@ func TestApproveRoutesWithPolicy_NeverRemovesRoutes(t *testing.T) {
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: tt.nodeHostname,
|
||||
UserID: ptr.To(user.ID),
|
||||
User: ptr.To(user),
|
||||
UserID: new(user.ID),
|
||||
User: new(user),
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: tt.announcedRoutes,
|
||||
},
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
|
||||
IPv4: new(netip.MustParseAddr("100.64.0.1")),
|
||||
ApprovedRoutes: tt.currentApproved,
|
||||
Tags: tt.nodeTags,
|
||||
}
|
||||
@@ -294,13 +293,13 @@ func TestApproveRoutesWithPolicy_EdgeCases(t *testing.T) {
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: "testnode",
|
||||
UserID: ptr.To(user.ID),
|
||||
User: ptr.To(user),
|
||||
UserID: new(user.ID),
|
||||
User: new(user),
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: tt.announcedRoutes,
|
||||
},
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
|
||||
IPv4: new(netip.MustParseAddr("100.64.0.1")),
|
||||
ApprovedRoutes: tt.currentApproved,
|
||||
}
|
||||
nodes := types.Nodes{&node}
|
||||
@@ -343,13 +342,13 @@ func TestApproveRoutesWithPolicy_NilPolicyManagerCase(t *testing.T) {
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: "testnode",
|
||||
UserID: ptr.To(user.ID),
|
||||
User: ptr.To(user),
|
||||
UserID: new(user.ID),
|
||||
User: new(user),
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: announcedRoutes,
|
||||
},
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
|
||||
IPv4: new(netip.MustParseAddr("100.64.0.1")),
|
||||
ApprovedRoutes: currentApproved,
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
var ap = func(ipStr string) *netip.Addr {
|
||||
@@ -33,6 +32,7 @@ func TestReduceNodes(t *testing.T) {
|
||||
rules []tailcfg.FilterRule
|
||||
node *types.Node
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
@@ -783,9 +783,11 @@ func TestReduceNodes(t *testing.T) {
|
||||
for _, v := range gotViews.All() {
|
||||
got = append(got, v.AsStruct())
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" {
|
||||
t.Errorf("ReduceNodes() unexpected result (-want +got):\n%s", diff)
|
||||
t.Log("Matchers: ")
|
||||
|
||||
for _, m := range matchers {
|
||||
t.Log("\t+", m.DebugString())
|
||||
}
|
||||
@@ -796,7 +798,7 @@ func TestReduceNodes(t *testing.T) {
|
||||
|
||||
func TestReduceNodesFromPolicy(t *testing.T) {
|
||||
n := func(id types.NodeID, ip, hostname, username string, routess ...string) *types.Node {
|
||||
var routes []netip.Prefix
|
||||
routes := make([]netip.Prefix, 0, len(routess))
|
||||
for _, route := range routess {
|
||||
routes = append(routes, netip.MustParsePrefix(route))
|
||||
}
|
||||
@@ -891,11 +893,13 @@ func TestReduceNodesFromPolicy(t *testing.T) {
|
||||
]
|
||||
}`,
|
||||
node: n(1, "100.64.0.1", "mobile", "mobile"),
|
||||
// autogroup:internet does not generate packet filters - it's handled
|
||||
// by exit node routing via AllowedIPs, not by packet filtering.
|
||||
// Only server is visible through the mobile -> server:80 rule.
|
||||
want: types.Nodes{
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
wantMatchers: 2,
|
||||
wantMatchers: 1,
|
||||
},
|
||||
{
|
||||
name: "2788-exit-node-0000-route",
|
||||
@@ -938,7 +942,7 @@ func TestReduceNodesFromPolicy(t *testing.T) {
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
wantMatchers: 2,
|
||||
wantMatchers: 1,
|
||||
},
|
||||
{
|
||||
name: "2788-exit-node-::0-route",
|
||||
@@ -981,7 +985,7 @@ func TestReduceNodesFromPolicy(t *testing.T) {
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
wantMatchers: 2,
|
||||
wantMatchers: 1,
|
||||
},
|
||||
{
|
||||
name: "2784-split-exit-node-access",
|
||||
@@ -1032,8 +1036,11 @@ func TestReduceNodesFromPolicy(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
for idx, pmf := range PolicyManagerFuncsForTest([]byte(tt.policy)) {
|
||||
t.Run(fmt.Sprintf("%s-index%d", tt.name, idx), func(t *testing.T) {
|
||||
var pm PolicyManager
|
||||
var err error
|
||||
var (
|
||||
pm PolicyManager
|
||||
err error
|
||||
)
|
||||
|
||||
pm, err = pmf(nil, tt.nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1051,9 +1058,11 @@ func TestReduceNodesFromPolicy(t *testing.T) {
|
||||
for _, v := range gotViews.All() {
|
||||
got = append(got, v.AsStruct())
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" {
|
||||
t.Errorf("TestReduceNodesFromPolicy() unexpected result (-want +got):\n%s", diff)
|
||||
t.Log("Matchers: ")
|
||||
|
||||
for _, m := range matchers {
|
||||
t.Log("\t+", m.DebugString())
|
||||
}
|
||||
@@ -1074,21 +1083,21 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
nodeUser1 := types.Node{
|
||||
Hostname: "user1-device",
|
||||
IPv4: ap("100.64.0.1"),
|
||||
UserID: ptr.To(uint(1)),
|
||||
User: ptr.To(users[0]),
|
||||
UserID: new(uint(1)),
|
||||
User: new(users[0]),
|
||||
}
|
||||
nodeUser2 := types.Node{
|
||||
Hostname: "user2-device",
|
||||
IPv4: ap("100.64.0.2"),
|
||||
UserID: ptr.To(uint(2)),
|
||||
User: ptr.To(users[1]),
|
||||
UserID: new(uint(2)),
|
||||
User: new(users[1]),
|
||||
}
|
||||
|
||||
taggedClient := types.Node{
|
||||
Hostname: "tagged-client",
|
||||
IPv4: ap("100.64.0.4"),
|
||||
UserID: ptr.To(uint(2)),
|
||||
User: ptr.To(users[1]),
|
||||
UserID: new(uint(2)),
|
||||
User: new(users[1]),
|
||||
Tags: []string{"tag:client"},
|
||||
}
|
||||
|
||||
@@ -1096,8 +1105,8 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
nodeTaggedServer := types.Node{
|
||||
Hostname: "tagged-server",
|
||||
IPv4: ap("100.64.0.5"),
|
||||
UserID: ptr.To(uint(1)),
|
||||
User: ptr.To(users[0]),
|
||||
UserID: new(uint(1)),
|
||||
User: new(users[0]),
|
||||
Tags: []string{"tag:server"},
|
||||
}
|
||||
|
||||
@@ -1231,7 +1240,7 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
]
|
||||
}`,
|
||||
expectErr: true,
|
||||
errorMessage: `invalid SSH action "invalid", must be one of: accept, check`,
|
||||
errorMessage: `invalid SSH action: "invalid", must be one of: accept, check`,
|
||||
},
|
||||
{
|
||||
name: "invalid-check-period",
|
||||
@@ -1278,7 +1287,7 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
]
|
||||
}`,
|
||||
expectErr: true,
|
||||
errorMessage: "autogroup \"autogroup:invalid\" is not supported",
|
||||
errorMessage: "autogroup not supported for SSH user",
|
||||
},
|
||||
{
|
||||
name: "autogroup-nonroot-should-use-wildcard-with-root-excluded",
|
||||
@@ -1451,13 +1460,17 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
for idx, pmf := range PolicyManagerFuncsForTest([]byte(tt.policy)) {
|
||||
t.Run(fmt.Sprintf("%s-index%d", tt.name, idx), func(t *testing.T) {
|
||||
var pm PolicyManager
|
||||
var err error
|
||||
var (
|
||||
pm PolicyManager
|
||||
err error
|
||||
)
|
||||
|
||||
pm, err = pmf(users, append(tt.peers, &tt.targetNode).ViewSlice())
|
||||
|
||||
if tt.expectErr {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tt.errorMessage)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1480,6 +1493,7 @@ func TestReduceRoutes(t *testing.T) {
|
||||
routes []netip.Prefix
|
||||
rules []tailcfg.FilterRule
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
@@ -2101,6 +2115,7 @@ func TestReduceRoutes(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
matchers := matcher.MatchesFromFilterRules(tt.args.rules)
|
||||
|
||||
got := ReduceRoutes(
|
||||
tt.args.node.View(),
|
||||
tt.args.routes,
|
||||
|
||||
@@ -18,6 +18,7 @@ func ReduceFilterRules(node types.NodeView, rules []tailcfg.FilterRule) []tailcf
|
||||
for _, rule := range rules {
|
||||
// record if the rule is actually relevant for the given node.
|
||||
var dests []tailcfg.NetPortRange
|
||||
|
||||
DEST_LOOP:
|
||||
for _, dest := range rule.DstPorts {
|
||||
expanded, err := util.ParseIPSet(dest.IP, nil)
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/policy/policyutil"
|
||||
v2 "github.com/juanfont/headscale/hscontrol/policy/v2"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
@@ -16,7 +17,6 @@ import (
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/util/must"
|
||||
)
|
||||
|
||||
@@ -144,13 +144,13 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"),
|
||||
User: ptr.To(users[0]),
|
||||
User: new(users[0]),
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"),
|
||||
User: ptr.To(users[0]),
|
||||
User: new(users[0]),
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{},
|
||||
@@ -191,7 +191,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: ptr.To(users[1]),
|
||||
User: new(users[1]),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.33.0.0/16"),
|
||||
@@ -202,10 +202,11 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: ptr.To(users[1]),
|
||||
User: new(users[1]),
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
// Merged: Both ACL rules combined (same SrcIPs and IPProto)
|
||||
{
|
||||
SrcIPs: []string{
|
||||
"100.64.0.1/32",
|
||||
@@ -222,23 +223,12 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
IP: "fd7a:115c:a1e0::1/128",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{
|
||||
"100.64.0.1/32",
|
||||
"100.64.0.2/32",
|
||||
"fd7a:115c:a1e0::1/128",
|
||||
"fd7a:115c:a1e0::2/128",
|
||||
},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "10.33.0.0/16",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
IPProto: []int{v2.ProtocolTCP, v2.ProtocolUDP, v2.ProtocolICMP, v2.ProtocolIPv6ICMP},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -283,19 +273,19 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: ptr.To(users[1]),
|
||||
User: new(users[1]),
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: ptr.To(users[2]),
|
||||
User: new(users[2]),
|
||||
},
|
||||
// "internal" exit node
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: ptr.To(users[3]),
|
||||
User: new(users[3]),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: tsaddr.ExitRoutes(),
|
||||
},
|
||||
@@ -344,7 +334,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: ptr.To(users[3]),
|
||||
User: new(users[3]),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: tsaddr.ExitRoutes(),
|
||||
},
|
||||
@@ -353,15 +343,18 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: ptr.To(users[2]),
|
||||
User: new(users[2]),
|
||||
},
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: ptr.To(users[1]),
|
||||
User: new(users[1]),
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
// Only the internal:* rule generates filters.
|
||||
// autogroup:internet does NOT generate packet filters - it's handled
|
||||
// by exit node routing via AllowedIPs, not by packet filtering.
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
@@ -374,12 +367,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: hsExitNodeDestForTest,
|
||||
IPProto: []int{6, 17},
|
||||
IPProto: []int{v2.ProtocolTCP, v2.ProtocolUDP, v2.ProtocolICMP, v2.ProtocolIPv6ICMP},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -453,7 +441,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: ptr.To(users[3]),
|
||||
User: new(users[3]),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: tsaddr.ExitRoutes(),
|
||||
},
|
||||
@@ -462,15 +450,16 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: ptr.To(users[2]),
|
||||
User: new(users[2]),
|
||||
},
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: ptr.To(users[1]),
|
||||
User: new(users[1]),
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
// Merged: Both ACL rules combined (same SrcIPs and IPProto)
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
@@ -482,12 +471,6 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
IP: "fd7a:115c:a1e0::100/128",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny},
|
||||
@@ -519,7 +502,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
{IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
IPProto: []int{v2.ProtocolTCP, v2.ProtocolUDP, v2.ProtocolICMP, v2.ProtocolIPv6ICMP},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -565,7 +548,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: ptr.To(users[3]),
|
||||
User: new(users[3]),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/16"), netip.MustParsePrefix("16.0.0.0/16")},
|
||||
},
|
||||
@@ -574,15 +557,16 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: ptr.To(users[2]),
|
||||
User: new(users[2]),
|
||||
},
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: ptr.To(users[1]),
|
||||
User: new(users[1]),
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
// Merged: Both ACL rules combined (same SrcIPs and IPProto)
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
@@ -594,12 +578,6 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
IP: "fd7a:115c:a1e0::100/128",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "8.0.0.0/8",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
@@ -609,7 +587,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
IPProto: []int{v2.ProtocolTCP, v2.ProtocolUDP, v2.ProtocolICMP, v2.ProtocolIPv6ICMP},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -655,7 +633,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: ptr.To(users[3]),
|
||||
User: new(users[3]),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/8"), netip.MustParsePrefix("16.0.0.0/8")},
|
||||
},
|
||||
@@ -664,15 +642,16 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: ptr.To(users[2]),
|
||||
User: new(users[2]),
|
||||
},
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: ptr.To(users[1]),
|
||||
User: new(users[1]),
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
// Merged: Both ACL rules combined (same SrcIPs and IPProto)
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
@@ -684,12 +663,6 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
IP: "fd7a:115c:a1e0::100/128",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "8.0.0.0/16",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
@@ -699,7 +672,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
IPProto: []int{v2.ProtocolTCP, v2.ProtocolUDP, v2.ProtocolICMP, v2.ProtocolIPv6ICMP},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -737,7 +710,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: ptr.To(users[3]),
|
||||
User: new(users[3]),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("172.16.0.0/24")},
|
||||
},
|
||||
@@ -747,7 +720,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: ptr.To(users[1]),
|
||||
User: new(users[1]),
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
@@ -767,7 +740,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
IPProto: []int{v2.ProtocolTCP, v2.ProtocolUDP, v2.ProtocolICMP, v2.ProtocolIPv6ICMP},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -804,13 +777,13 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: ptr.To(users[3]),
|
||||
User: new(users[3]),
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: ptr.To(users[1]),
|
||||
User: new(users[1]),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{p("172.16.0.0/24"), p("10.10.11.0/24"), p("10.10.12.0/24")},
|
||||
},
|
||||
@@ -824,10 +797,14 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
for idx, pmf := range policy.PolicyManagerFuncsForTest([]byte(tt.pol)) {
|
||||
t.Run(fmt.Sprintf("%s-index%d", tt.name, idx), func(t *testing.T) {
|
||||
var pm policy.PolicyManager
|
||||
var err error
|
||||
var (
|
||||
pm policy.PolicyManager
|
||||
err error
|
||||
)
|
||||
|
||||
pm, err = pmf(users, append(tt.peers, tt.node).ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
got, _ := pm.Filter()
|
||||
t.Logf("full filter:\n%s", must.Get(json.MarshalIndent(got, "", " ")))
|
||||
got = policyutil.ReduceFilterRules(tt.node.View(), got)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user