mirror of
https://github.com/juanfont/headscale.git
synced 2026-03-30 22:22:14 +02:00
Compare commits
142 Commits
kradalby/3
...
fix-noise-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cdb56bc72a | ||
|
|
fffc58b5d0 | ||
|
|
4aca9d6568 | ||
|
|
3daf45e88a | ||
|
|
b81d6c734d | ||
|
|
c5ef1d3bb9 | ||
|
|
542cdb2cb2 | ||
|
|
5e33259550 | ||
|
|
65880ecb58 | ||
|
|
37c6a9e3a6 | ||
|
|
8423af2732 | ||
|
|
9baa795ddb | ||
|
|
acddd73183 | ||
|
|
47307d19cf | ||
|
|
5c449db125 | ||
|
|
2be94ce19a | ||
|
|
6c59d3e601 | ||
|
|
0acf09bdd2 | ||
|
|
414d3bbbd8 | ||
|
|
0f12e414a6 | ||
|
|
df339cd290 | ||
|
|
610c1daa4d | ||
|
|
84adda226b | ||
|
|
0f97294665 | ||
|
|
3db0a483ed | ||
|
|
7bab8da366 | ||
|
|
48cc98b787 | ||
|
|
61a14bb0e4 | ||
|
|
dc0e52a960 | ||
|
|
107c2f2f70 | ||
|
|
4a7e1475c0 | ||
|
|
cb3b6949ea | ||
|
|
30338441c1 | ||
|
|
25ccb5a161 | ||
|
|
8048f10d13 | ||
|
|
be4fd9ff2d | ||
|
|
1e4fc3f179 | ||
|
|
894e6946dc | ||
|
|
75e56df9e4 | ||
|
|
52d454d0c8 | ||
|
|
f20bd0cf08 | ||
|
|
a8f7fedced | ||
|
|
b668c7a596 | ||
|
|
49744cd467 | ||
|
|
a0d6802d5b | ||
|
|
13ebea192c | ||
|
|
af777f44f4 | ||
|
|
7460bec767 | ||
|
|
ca321d3c13 | ||
|
|
2765fd397f | ||
|
|
d72a06c6c6 | ||
|
|
e816397d54 | ||
|
|
22fccae125 | ||
|
|
6c08b49d63 | ||
|
|
7b7b270126 | ||
|
|
d6c39e65a5 | ||
|
|
8891ec9835 | ||
|
|
095106f498 | ||
|
|
e4fe216e45 | ||
|
|
e6546b2cea | ||
|
|
aae2f7de71 | ||
|
|
cfb308b4a7 | ||
|
|
4bb0241257 | ||
|
|
513544cc11 | ||
|
|
d556df1c36 | ||
|
|
d15ec28799 | ||
|
|
eccf64eb58 | ||
|
|
43afeedde2 | ||
|
|
73613d7f53 | ||
|
|
30d18575be | ||
|
|
70f8141abd | ||
|
|
82958835ce | ||
|
|
9c3a3c5837 | ||
|
|
faf55f5e8f | ||
|
|
e3323b65e5 | ||
|
|
8f60b819ec | ||
|
|
c29bcd2eaf | ||
|
|
890a044ef6 | ||
|
|
8028fa5483 | ||
|
|
a7f981e30e | ||
|
|
e0d8c3c877 | ||
|
|
c1b468f9f4 | ||
|
|
900f4b7b75 | ||
|
|
64f23136a2 | ||
|
|
0f6d312ada | ||
|
|
20dff82f95 | ||
|
|
31c4331a91 | ||
|
|
ce580f8245 | ||
|
|
bfb6fd80df | ||
|
|
3acce2da87 | ||
|
|
4a9a329339 | ||
|
|
dd16567c52 | ||
|
|
e0a436cefc | ||
|
|
53cdeff129 | ||
|
|
7148a690d0 | ||
|
|
4e73133b9f | ||
|
|
4f8724151e | ||
|
|
91730e2a1d | ||
|
|
b5090a01ec | ||
|
|
27f5641341 | ||
|
|
cf3d30b6f6 | ||
|
|
58020696fe | ||
|
|
e44b402fe4 | ||
|
|
835b7eb960 | ||
|
|
95b1fd636e | ||
|
|
834ac27779 | ||
|
|
4a4032a4b0 | ||
|
|
29aa08df0e | ||
|
|
0b1727c337 | ||
|
|
08fe2e4d6c | ||
|
|
cb29cade46 | ||
|
|
f27298c759 | ||
|
|
8baa14ef4a | ||
|
|
ebdbe03639 | ||
|
|
f735502eae | ||
|
|
53d17aa321 | ||
|
|
14f833bdb9 | ||
|
|
9e50071df9 | ||
|
|
c907b0d323 | ||
|
|
97fa117c48 | ||
|
|
b5329ff0f3 | ||
|
|
eac8a57bce | ||
|
|
44af046196 | ||
|
|
4a744f423b | ||
|
|
ca75e096e6 | ||
|
|
ce7c256d1e | ||
|
|
4912ceaaf5 | ||
|
|
d7f7f2c85e | ||
|
|
df184e5276 | ||
|
|
0630fd32e5 | ||
|
|
306aabbbce | ||
|
|
a09b0d1d69 | ||
|
|
362696a5ef | ||
|
|
1f32c8bf61 | ||
|
|
fb137a8fe3 | ||
|
|
c2f28efbd7 | ||
|
|
11f0d4cfdd | ||
|
|
5d300273dc | ||
|
|
7f003ecaff | ||
|
|
2695d1527e | ||
|
|
d32f6707f7 | ||
|
|
89e436f0e6 |
6
.github/ISSUE_TEMPLATE/bug_report.yaml
vendored
6
.github/ISSUE_TEMPLATE/bug_report.yaml
vendored
@@ -6,8 +6,7 @@ body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is this a support request?
|
||||
description:
|
||||
This issue tracker is for bugs and feature requests only. If you need
|
||||
description: This issue tracker is for bugs and feature requests only. If you need
|
||||
help, please use ask in our Discord community
|
||||
options:
|
||||
- label: This is not a support request
|
||||
@@ -15,8 +14,7 @@ body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for this?
|
||||
description:
|
||||
Please search to see if an issue already exists for the bug you
|
||||
description: Please search to see if an issue already exists for the bug you
|
||||
encountered.
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
8
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -3,9 +3,9 @@ blank_issues_enabled: false
|
||||
|
||||
# Contact links
|
||||
contact_links:
|
||||
- name: "headscale usage documentation"
|
||||
url: "https://github.com/juanfont/headscale/blob/main/docs"
|
||||
about: "Find documentation about how to configure and run headscale."
|
||||
- name: "headscale Discord community"
|
||||
url: "https://discord.gg/xGj2TuqyxY"
|
||||
url: "https://discord.gg/c84AZQhmpx"
|
||||
about: "Please ask and answer questions about usage of headscale here."
|
||||
- name: "headscale usage documentation"
|
||||
url: "https://headscale.net/"
|
||||
about: "Find documentation about how to configure and run headscale."
|
||||
|
||||
80
.github/label-response/needs-more-info.md
vendored
Normal file
80
.github/label-response/needs-more-info.md
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
Thank you for taking the time to report this issue.
|
||||
|
||||
To help us investigate and resolve this, we need more information. Please provide the following:
|
||||
|
||||
> [!TIP]
|
||||
> Most issues turn out to be configuration errors rather than bugs. We encourage you to discuss your problem in our [Discord community](https://discord.gg/c84AZQhmpx) **before** opening an issue. The community can often help identify misconfigurations quickly, saving everyone time.
|
||||
|
||||
## Required Information
|
||||
|
||||
### Environment Details
|
||||
|
||||
- **Headscale version**: (run `headscale version`)
|
||||
- **Tailscale client version**: (run `tailscale version`)
|
||||
- **Operating System**: (e.g., Ubuntu 24.04, macOS 14, Windows 11)
|
||||
- **Deployment method**: (binary, Docker, Kubernetes, etc.)
|
||||
- **Reverse proxy**: (if applicable: nginx, Traefik, Caddy, etc. - include configuration)
|
||||
|
||||
### Debug Information
|
||||
|
||||
Please follow our [Debugging and Troubleshooting Guide](https://headscale.net/stable/ref/debug/) and provide:
|
||||
|
||||
1. **Client netmap dump** (from affected Tailscale client):
|
||||
|
||||
```bash
|
||||
tailscale debug netmap > netmap.json
|
||||
```
|
||||
|
||||
2. **Client status dump** (from affected Tailscale client):
|
||||
|
||||
```bash
|
||||
tailscale status --json > status.json
|
||||
```
|
||||
|
||||
3. **Tailscale client logs** (if experiencing client issues):
|
||||
|
||||
```bash
|
||||
tailscale debug daemon-logs
|
||||
```
|
||||
|
||||
> [!IMPORTANT]
|
||||
> We need logs from **multiple nodes** to understand the full picture:
|
||||
>
|
||||
> - The node(s) initiating connections
|
||||
> - The node(s) being connected to
|
||||
>
|
||||
> Without logs from both sides, we cannot diagnose connectivity issues.
|
||||
|
||||
4. **Headscale server logs** with `log.level: trace` enabled
|
||||
|
||||
5. **Headscale configuration** (with sensitive values redacted - see rules below)
|
||||
|
||||
6. **ACL/Policy configuration** (if using ACLs)
|
||||
|
||||
7. **Proxy/Docker configuration** (if applicable - nginx.conf, docker-compose.yml, Traefik config, etc.)
|
||||
|
||||
## Formatting Requirements
|
||||
|
||||
- **Attach long files** - Do not paste large logs or configurations inline. Use GitHub file attachments or GitHub Gists.
|
||||
- **Use proper Markdown** - Format code blocks, logs, and configurations with appropriate syntax highlighting.
|
||||
- **Structure your response** - Use the headings above to organize your information clearly.
|
||||
|
||||
## Redaction Rules
|
||||
|
||||
> [!CAUTION]
|
||||
> **Replace, do not remove.** Removing information makes debugging impossible.
|
||||
|
||||
When redacting sensitive information:
|
||||
|
||||
- ✅ **Replace consistently** - If you change `alice@company.com` to `user1@example.com`, use `user1@example.com` everywhere (logs, config, policy, etc.)
|
||||
- ✅ **Use meaningful placeholders** - `user1@example.com`, `bob@example.com`, `my-secret-key` are acceptable
|
||||
- ❌ **Never remove information** - Gaps in data prevent us from correlating events across logs
|
||||
- ❌ **Never redact IP addresses** - We need the actual IPs to trace network paths and identify issues
|
||||
|
||||
**If redaction rules are not followed, we will be unable to debug the issue and will have to close it.**
|
||||
|
||||
---
|
||||
|
||||
**Note:** This issue will be automatically closed in 3 days if no additional information is provided. Once you reply with the requested information, the `needs-more-info` label will be removed automatically.
|
||||
|
||||
If you need help gathering this information, please visit our [Discord community](https://discord.gg/c84AZQhmpx).
|
||||
15
.github/label-response/support-request.md
vendored
Normal file
15
.github/label-response/support-request.md
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
Thank you for reaching out.
|
||||
|
||||
This issue tracker is used for **bug reports and feature requests** only. Your question appears to be a support or configuration question rather than a bug report.
|
||||
|
||||
For help with setup, configuration, or general questions, please visit our [Discord community](https://discord.gg/c84AZQhmpx) where the community and maintainers can assist you in real-time.
|
||||
|
||||
**Before posting in Discord, please check:**
|
||||
|
||||
- [Documentation](https://headscale.net/)
|
||||
- [FAQ](https://headscale.net/stable/faq/)
|
||||
- [Debugging and Troubleshooting Guide](https://headscale.net/stable/ref/debug/)
|
||||
|
||||
If after troubleshooting you determine this is actually a bug, please open a new issue with the required debug information from the troubleshooting guide.
|
||||
|
||||
This issue has been automatically closed.
|
||||
18
.github/workflows/integration-test-template.yml
vendored
18
.github/workflows/integration-test-template.yml
vendored
@@ -67,6 +67,24 @@ jobs:
|
||||
with:
|
||||
name: postgres-image
|
||||
path: /tmp/artifacts
|
||||
- name: Pin Docker to v28 (avoid v29 breaking changes)
|
||||
run: |
|
||||
# Docker 29 breaks docker build via Go client libraries and
|
||||
# docker load/save with certain tarball formats.
|
||||
# Pin to Docker 28.x until our tooling is updated.
|
||||
# https://github.com/actions/runner-images/issues/13474
|
||||
sudo install -m 0755 -d /etc/apt/keyrings
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg \
|
||||
| sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
||||
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \
|
||||
https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo "$VERSION_CODENAME") stable" \
|
||||
| sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
sudo apt-get update -qq
|
||||
VERSION=$(apt-cache madison docker-ce | grep '28\.5' | head -1 | awk '{print $3}')
|
||||
sudo apt-get install -y --allow-downgrades \
|
||||
"docker-ce=${VERSION}" "docker-ce-cli=${VERSION}"
|
||||
sudo systemctl restart docker
|
||||
docker version
|
||||
- name: Load Docker images, Go cache, and prepare binary
|
||||
run: |
|
||||
gunzip -c /tmp/artifacts/headscale-image.tar.gz | docker load
|
||||
|
||||
28
.github/workflows/needs-more-info-comment.yml
vendored
Normal file
28
.github/workflows/needs-more-info-comment.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Needs More Info - Post Comment
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
post-comment:
|
||||
if: >-
|
||||
github.event.label.name == 'needs-more-info' &&
|
||||
github.repository == 'juanfont/headscale'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
sparse-checkout: .github/label-response/needs-more-info.md
|
||||
sparse-checkout-cone-mode: false
|
||||
|
||||
- name: Post instruction comment
|
||||
run: gh issue comment "$NUMBER" --body-file .github/label-response/needs-more-info.md
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
NUMBER: ${{ github.event.issue.number }}
|
||||
98
.github/workflows/needs-more-info-timer.yml
vendored
Normal file
98
.github/workflows/needs-more-info-timer.yml
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
name: Needs More Info - Timer
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *" # Daily at midnight UTC
|
||||
issue_comment:
|
||||
types: [created]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
# When a non-bot user comments on a needs-more-info issue, remove the label.
|
||||
remove-label-on-response:
|
||||
if: >-
|
||||
github.repository == 'juanfont/headscale' &&
|
||||
github.event_name == 'issue_comment' &&
|
||||
github.event.comment.user.type != 'Bot' &&
|
||||
contains(github.event.issue.labels.*.name, 'needs-more-info')
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- name: Remove needs-more-info label
|
||||
run: gh issue edit "$NUMBER" --remove-label needs-more-info
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
NUMBER: ${{ github.event.issue.number }}
|
||||
|
||||
# On schedule, close issues that have had no human response for 3 days.
|
||||
close-stale:
|
||||
if: >-
|
||||
github.repository == 'juanfont/headscale' &&
|
||||
github.event_name != 'issue_comment'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- uses: hustcer/setup-nu@920172d92eb04671776f3ba69d605d3b09351c30 # v3.22
|
||||
with:
|
||||
version: "*"
|
||||
|
||||
- name: Close stale needs-more-info issues
|
||||
shell: nu {0}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
run: |
|
||||
let issues = (gh issue list
|
||||
--repo $env.GH_REPO
|
||||
--label "needs-more-info"
|
||||
--state open
|
||||
--json number
|
||||
| from json)
|
||||
|
||||
for issue in $issues {
|
||||
let number = $issue.number
|
||||
print $"Checking issue #($number)"
|
||||
|
||||
# Find when needs-more-info was last added
|
||||
let events = (gh api $"repos/($env.GH_REPO)/issues/($number)/events"
|
||||
--paginate | from json | flatten)
|
||||
let label_event = ($events
|
||||
| where event == "labeled" and label.name == "needs-more-info"
|
||||
| last)
|
||||
let label_added_at = ($label_event.created_at | into datetime)
|
||||
|
||||
# Check for non-bot comments after the label was added
|
||||
let comments = (gh api $"repos/($env.GH_REPO)/issues/($number)/comments"
|
||||
--paginate | from json | flatten)
|
||||
let human_responses = ($comments
|
||||
| where user.type != "Bot"
|
||||
| where { ($in.created_at | into datetime) > $label_added_at })
|
||||
|
||||
if ($human_responses | length) > 0 {
|
||||
print $" Human responded, removing label"
|
||||
gh issue edit $number --repo $env.GH_REPO --remove-label needs-more-info
|
||||
continue
|
||||
}
|
||||
|
||||
# Check if 3 days have passed
|
||||
let elapsed = (date now) - $label_added_at
|
||||
if $elapsed < 3day {
|
||||
print $" Only ($elapsed | format duration day) elapsed, skipping"
|
||||
continue
|
||||
}
|
||||
|
||||
print $" No response for ($elapsed | format duration day), closing"
|
||||
let message = [
|
||||
"This issue has been automatically closed because no additional information was provided within 3 days."
|
||||
""
|
||||
"If you have the requested information, please open a new issue and include the debug information requested above."
|
||||
""
|
||||
"Thank you for your understanding."
|
||||
] | str join "\n"
|
||||
gh issue comment $number --repo $env.GH_REPO --body $message
|
||||
gh issue close $number --repo $env.GH_REPO --reason "not planned"
|
||||
gh issue edit $number --repo $env.GH_REPO --remove-label needs-more-info
|
||||
}
|
||||
19
.github/workflows/release.yml
vendored
19
.github/workflows/release.yml
vendored
@@ -17,6 +17,25 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Pin Docker to v28 (avoid v29 breaking changes)
|
||||
run: |
|
||||
# Docker 29 breaks docker build via Go client libraries and
|
||||
# docker load/save with certain tarball formats.
|
||||
# Pin to Docker 28.x until our tooling is updated.
|
||||
# https://github.com/actions/runner-images/issues/13474
|
||||
sudo install -m 0755 -d /etc/apt/keyrings
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg \
|
||||
| sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
||||
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \
|
||||
https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo "$VERSION_CODENAME") stable" \
|
||||
| sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
sudo apt-get update -qq
|
||||
VERSION=$(apt-cache madison docker-ce | grep '28\.5' | head -1 | awk '{print $3}')
|
||||
sudo apt-get install -y --allow-downgrades \
|
||||
"docker-ce=${VERSION}" "docker-ce-cli=${VERSION}"
|
||||
sudo systemctl restart docker
|
||||
docker version
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -23,5 +23,5 @@ jobs:
|
||||
since being marked as stale."
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
exempt-issue-labels: "no-stale-bot"
|
||||
exempt-issue-labels: "no-stale-bot,needs-more-info"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
30
.github/workflows/support-request.yml
vendored
Normal file
30
.github/workflows/support-request.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Support Request - Close Issue
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
close-support-request:
|
||||
if: >-
|
||||
github.event.label.name == 'support-request' &&
|
||||
github.repository == 'juanfont/headscale'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
sparse-checkout: .github/label-response/support-request.md
|
||||
sparse-checkout-cone-mode: false
|
||||
|
||||
- name: Post comment and close issue
|
||||
run: |
|
||||
gh issue comment "$NUMBER" --body-file .github/label-response/support-request.md
|
||||
gh issue close "$NUMBER" --reason "not planned"
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
NUMBER: ${{ github.event.issue.number }}
|
||||
46
.github/workflows/test-integration.yaml
vendored
46
.github/workflows/test-integration.yaml
vendored
@@ -69,6 +69,25 @@ jobs:
|
||||
name: go-cache
|
||||
path: go-cache.tar.gz
|
||||
retention-days: 10
|
||||
- name: Pin Docker to v28 (avoid v29 breaking changes)
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: |
|
||||
# Docker 29 breaks docker build via Go client libraries and
|
||||
# docker load/save with certain tarball formats.
|
||||
# Pin to Docker 28.x until our tooling is updated.
|
||||
# https://github.com/actions/runner-images/issues/13474
|
||||
sudo install -m 0755 -d /etc/apt/keyrings
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg \
|
||||
| sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
||||
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \
|
||||
https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo "$VERSION_CODENAME") stable" \
|
||||
| sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
sudo apt-get update -qq
|
||||
VERSION=$(apt-cache madison docker-ce | grep '28\.5' | head -1 | awk '{print $3}')
|
||||
sudo apt-get install -y --allow-downgrades \
|
||||
"docker-ce=${VERSION}" "docker-ce-cli=${VERSION}"
|
||||
sudo systemctl restart docker
|
||||
docker version
|
||||
- name: Build headscale image
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: |
|
||||
@@ -104,6 +123,24 @@ jobs:
|
||||
needs: build
|
||||
if: needs.build.outputs.files-changed == 'true'
|
||||
steps:
|
||||
- name: Pin Docker to v28 (avoid v29 breaking changes)
|
||||
run: |
|
||||
# Docker 29 breaks docker build via Go client libraries and
|
||||
# docker load/save with certain tarball formats.
|
||||
# Pin to Docker 28.x until our tooling is updated.
|
||||
# https://github.com/actions/runner-images/issues/13474
|
||||
sudo install -m 0755 -d /etc/apt/keyrings
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg \
|
||||
| sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
||||
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \
|
||||
https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo "$VERSION_CODENAME") stable" \
|
||||
| sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
sudo apt-get update -qq
|
||||
VERSION=$(apt-cache madison docker-ce | grep '28\.5' | head -1 | awk '{print $3}')
|
||||
sudo apt-get install -y --allow-downgrades \
|
||||
"docker-ce=${VERSION}" "docker-ce-cli=${VERSION}"
|
||||
sudo systemctl restart docker
|
||||
docker version
|
||||
- name: Pull and save postgres image
|
||||
run: |
|
||||
docker pull postgres:latest
|
||||
@@ -192,6 +229,7 @@ jobs:
|
||||
- TestUpdateHostnameFromClient
|
||||
- TestExpireNode
|
||||
- TestSetNodeExpiryInFuture
|
||||
- TestDisableNodeExpiry
|
||||
- TestNodeOnlineStatus
|
||||
- TestPingAllByIPManyUpDown
|
||||
- Test2118DeletingOnlineNodePanics
|
||||
@@ -216,6 +254,13 @@ jobs:
|
||||
- TestSSHIsBlockedInACL
|
||||
- TestSSHUserOnlyIsolation
|
||||
- TestSSHAutogroupSelf
|
||||
- TestSSHOneUserToOneCheckModeCLI
|
||||
- TestSSHOneUserToOneCheckModeOIDC
|
||||
- TestSSHCheckModeUnapprovedTimeout
|
||||
- TestSSHCheckModeCheckPeriodCLI
|
||||
- TestSSHCheckModeAutoApprove
|
||||
- TestSSHCheckModeNegativeCLI
|
||||
- TestSSHLocalpart
|
||||
- TestTagsAuthKeyWithTagRequestDifferentTag
|
||||
- TestTagsAuthKeyWithTagNoAdvertiseFlag
|
||||
- TestTagsAuthKeyWithTagCannotAddViaCLI
|
||||
@@ -247,6 +292,7 @@ jobs:
|
||||
- TestTagsUserLoginReauthWithEmptyTagsRemovesAllTags
|
||||
- TestTagsAuthKeyWithoutUserInheritsTags
|
||||
- TestTagsAuthKeyWithoutUserRejectsAdvertisedTags
|
||||
- TestTagsAuthKeyConvertToUserViaCLIRegister
|
||||
uses: ./.github/workflows/integration-test-template.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
|
||||
@@ -18,6 +18,7 @@ linters:
|
||||
- lll
|
||||
- maintidx
|
||||
- makezero
|
||||
- mnd
|
||||
- musttag
|
||||
- nestif
|
||||
- nolintlint
|
||||
@@ -37,6 +38,23 @@ linters:
|
||||
time.Sleep is forbidden.
|
||||
In tests: use assert.EventuallyWithT for polling/waiting patterns.
|
||||
In production code: use a backoff strategy (e.g., cenkalti/backoff) or proper synchronization primitives.
|
||||
# Forbid inline string literals in zerolog field methods - use zf.* constants
|
||||
- pattern: '\.(Str|Int|Int8|Int16|Int32|Int64|Uint|Uint8|Uint16|Uint32|Uint64|Float32|Float64|Bool|Dur|Time|TimeDiff|Strs|Ints|Uints|Floats|Bools|Any|Interface)\("[^"]+"'
|
||||
msg: >-
|
||||
Use zf.* constants for zerolog field names instead of string literals.
|
||||
Import "github.com/juanfont/headscale/hscontrol/util/zlog/zf" and use
|
||||
constants like zf.NodeID, zf.UserName, etc. Add new constants to
|
||||
hscontrol/util/zlog/zf/fields.go if needed.
|
||||
# Forbid ptr.To - use Go 1.26 new(expr) instead
|
||||
- pattern: 'ptr\.To\('
|
||||
msg: >-
|
||||
ptr.To is forbidden. Use Go 1.26's new(expr) syntax instead.
|
||||
Example: ptr.To(value) → new(value)
|
||||
# Forbid tsaddr.SortPrefixes - use slices.SortFunc with netip.Prefix.Compare
|
||||
- pattern: 'tsaddr\.SortPrefixes'
|
||||
msg: >-
|
||||
tsaddr.SortPrefixes is forbidden. Use Go 1.26's netip.Prefix.Compare instead.
|
||||
Example: slices.SortFunc(prefixes, netip.Prefix.Compare)
|
||||
analyze-types: true
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
version: 2
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy -compat=1.25
|
||||
- go mod tidy -compat=1.26
|
||||
- go mod vendor
|
||||
|
||||
release:
|
||||
@@ -13,29 +13,6 @@ release:
|
||||
|
||||
Please follow the steps outlined in the [upgrade guide](https://headscale.net/stable/setup/upgrade/) to update your existing Headscale installation.
|
||||
|
||||
**It's best to update from one stable version to the next** (e.g., 0.24.0 → 0.25.1 → 0.26.1) in case you are multiple releases behind. You should always pick the latest available patch release.
|
||||
|
||||
Be sure to check the changelog above for version-specific upgrade instructions and breaking changes.
|
||||
|
||||
### Backup Your Database
|
||||
|
||||
**Always backup your database before upgrading.** Here's how to backup a SQLite database:
|
||||
|
||||
```bash
|
||||
# Stop headscale
|
||||
systemctl stop headscale
|
||||
|
||||
# Backup sqlite database
|
||||
cp /var/lib/headscale/db.sqlite /var/lib/headscale/db.sqlite.backup
|
||||
|
||||
# Backup sqlite WAL/SHM files (if they exist)
|
||||
cp /var/lib/headscale/db.sqlite-wal /var/lib/headscale/db.sqlite-wal.backup
|
||||
cp /var/lib/headscale/db.sqlite-shm /var/lib/headscale/db.sqlite-shm.backup
|
||||
|
||||
# Start headscale (migration will run automatically)
|
||||
systemctl start headscale
|
||||
```
|
||||
|
||||
builds:
|
||||
- id: headscale
|
||||
main: ./cmd/headscale
|
||||
|
||||
2
.mdformat.toml
Normal file
2
.mdformat.toml
Normal file
@@ -0,0 +1,2 @@
|
||||
[plugin.mkdocs]
|
||||
align_semantic_breaks_in_lists = true
|
||||
@@ -43,26 +43,20 @@ repos:
|
||||
entry: prettier --write --list-different
|
||||
language: system
|
||||
exclude: ^docs/
|
||||
types_or:
|
||||
[
|
||||
javascript,
|
||||
jsx,
|
||||
ts,
|
||||
tsx,
|
||||
yaml,
|
||||
json,
|
||||
toml,
|
||||
html,
|
||||
css,
|
||||
scss,
|
||||
sass,
|
||||
markdown,
|
||||
]
|
||||
types_or: [javascript, jsx, ts, tsx, yaml, json, toml, html, css, scss, sass, markdown]
|
||||
|
||||
# mdformat for docs
|
||||
- id: mdformat
|
||||
name: mdformat
|
||||
entry: mdformat
|
||||
language: system
|
||||
types_or: [markdown]
|
||||
files: ^docs/
|
||||
|
||||
# golangci-lint for Go code quality
|
||||
- id: golangci-lint
|
||||
name: golangci-lint
|
||||
entry: nix develop --command golangci-lint run --new-from-rev=HEAD~1 --timeout=5m --fix
|
||||
entry: nix develop --command -- golangci-lint run --new-from-rev=HEAD~1 --timeout=5m --fix
|
||||
language: system
|
||||
types: [go]
|
||||
pass_filenames: false
|
||||
|
||||
@@ -1,5 +1,2 @@
|
||||
.github/workflows/test-integration-v2*
|
||||
docs/about/features.md
|
||||
docs/ref/api.md
|
||||
docs/ref/configuration.md
|
||||
docs/ref/oidc.md
|
||||
docs/
|
||||
|
||||
65
CHANGELOG.md
65
CHANGELOG.md
@@ -1,6 +1,65 @@
|
||||
# CHANGELOG
|
||||
|
||||
## 0.28.0 (202x-xx-xx)
|
||||
## 0.29.0 (202x-xx-xx)
|
||||
|
||||
**Minimum supported Tailscale client version: v1.76.0**
|
||||
|
||||
### Tailscale ACL compatibility improvements
|
||||
|
||||
Extensive test cases were systematically generated using Tailscale clients and the official SaaS
|
||||
to understand how the packet filter should be generated. We discovered a few differences, but
|
||||
overall our implementation was very close.
|
||||
[#3036](https://github.com/juanfont/headscale/pull/3036)
|
||||
|
||||
### SSH check action
|
||||
|
||||
SSH rules with `"action": "check"` are now supported. When a client initiates a SSH connection to a node
|
||||
with a `check` action policy, the user is prompted to authenticate via OIDC or CLI approval before access
|
||||
is granted.
|
||||
|
||||
A new `headscale auth` CLI command group supports the approval flow:
|
||||
|
||||
- `headscale auth approve --auth-id <id>` approves a pending authentication request (SSH check or web auth)
|
||||
- `headscale auth reject --auth-id <id>` rejects a pending authentication request
|
||||
- `headscale auth register --auth-id <id> --user <user>` registers a node (replaces deprecated `headscale nodes register`)
|
||||
|
||||
[#1850](https://github.com/juanfont/headscale/pull/1850)
|
||||
|
||||
### BREAKING
|
||||
|
||||
- **ACL Policy**: Wildcard (`*`) in ACL sources and destinations now resolves to Tailscale's CGNAT range (`100.64.0.0/10`) and ULA range (`fd7a:115c:a1e0::/48`) instead of all IPs (`0.0.0.0/0` and `::/0`) [#3036](https://github.com/juanfont/headscale/pull/3036)
|
||||
- This better matches Tailscale's security model where `*` means "any node in the tailnet" rather than "any IP address"
|
||||
- Policies relying on wildcard to match non-Tailscale IPs will need to use explicit CIDR ranges instead
|
||||
- **Note**: Users with non-standard IP ranges configured in `prefixes.ipv4` or `prefixes.ipv6` (which is unsupported and produces a warning) will need to explicitly specify their CIDR ranges in ACL rules instead of using `*`
|
||||
- **ACL Policy**: Validate autogroup:self source restrictions matching Tailscale behavior - tags, hosts, and IPs are rejected as sources for autogroup:self destinations [#3036](https://github.com/juanfont/headscale/pull/3036)
|
||||
- Policies using tags, hosts, or IP addresses as sources for autogroup:self destinations will now fail validation
|
||||
- **Upgrade path**: Headscale now enforces a strict version upgrade path [#3083](https://github.com/juanfont/headscale/pull/3083)
|
||||
- Skipping minor versions (e.g. 0.27 → 0.29) is blocked; upgrade one minor version at a time
|
||||
- Downgrading to a previous minor version is blocked
|
||||
- Patch version changes within the same minor are always allowed
|
||||
- **ACL Policy**: The `proto:icmp` protocol name now only includes ICMPv4 (protocol 1), matching Tailscale behavior [#3036](https://github.com/juanfont/headscale/pull/3036)
|
||||
- Previously, `proto:icmp` included both ICMPv4 and ICMPv6
|
||||
- Use `proto:ipv6-icmp` or protocol number `58` explicitly for ICMPv6
|
||||
- **CLI**: `headscale nodes register` is deprecated in favour of `headscale auth register --auth-id <id> --user <user>` [#1850](https://github.com/juanfont/headscale/pull/1850)
|
||||
- The old command continues to work but will be removed in a future release
|
||||
|
||||
### Changes
|
||||
|
||||
- **SSH Policy**: Add support for `localpart:*@<domain>` in SSH rule `users` field, mapping each matching user's email local-part as their OS username [#3091](https://github.com/juanfont/headscale/pull/3091)
|
||||
- **ACL Policy**: Add ICMP and IPv6-ICMP protocols to default filter rules when no protocol is specified [#3036](https://github.com/juanfont/headscale/pull/3036)
|
||||
- **ACL Policy**: Fix autogroup:self handling for tagged nodes - tagged nodes no longer incorrectly receive autogroup:self filter rules [#3036](https://github.com/juanfont/headscale/pull/3036)
|
||||
- **ACL Policy**: Use CIDR format for autogroup:self destination IPs matching Tailscale behavior [#3036](https://github.com/juanfont/headscale/pull/3036)
|
||||
- **ACL Policy**: Merge filter rules with identical SrcIPs and IPProto matching Tailscale behavior - multiple ACL rules with the same source now produce a single FilterRule with combined DstPorts [#3036](https://github.com/juanfont/headscale/pull/3036)
|
||||
- Remove deprecated `--namespace` flag from `nodes list`, `nodes register`, and `debug create-node` commands (use `--user` instead) [#3093](https://github.com/juanfont/headscale/pull/3093)
|
||||
- Remove deprecated `namespace`/`ns` command aliases for `users` and `machine`/`machines` aliases for `nodes` [#3093](https://github.com/juanfont/headscale/pull/3093)
|
||||
- Add SSH `check` action support with OIDC and CLI-based approval flows [#1850](https://github.com/juanfont/headscale/pull/1850)
|
||||
- Add `headscale auth register`, `headscale auth approve`, and `headscale auth reject` CLI commands [#1850](https://github.com/juanfont/headscale/pull/1850)
|
||||
- Add `auth` related routes to the API. The `auth/register` endpoint now expects data as JSON [#1850](https://github.com/juanfont/headscale/pull/1850)
|
||||
- Deprecate `headscale nodes register --key` in favour of `headscale auth register --auth-id` [#1850](https://github.com/juanfont/headscale/pull/1850)
|
||||
- Generalise auth templates into reusable `AuthSuccess` and `AuthWeb` components [#1850](https://github.com/juanfont/headscale/pull/1850)
|
||||
- Unify auth pipeline with `AuthVerdict` type, supporting registration, reauthentication, and SSH checks [#1850](https://github.com/juanfont/headscale/pull/1850)
|
||||
|
||||
## 0.28.0 (2026-02-04)
|
||||
|
||||
**Minimum supported Tailscale client version: v1.74.0**
|
||||
|
||||
@@ -162,9 +221,7 @@ sequentially through each stable release, selecting the latest patch version ava
|
||||
- Fix autogroup:self preventing visibility of nodes matched by other ACL rules [#2882](https://github.com/juanfont/headscale/pull/2882)
|
||||
- Fix nodes being rejected after pre-authentication key expiration [#2917](https://github.com/juanfont/headscale/pull/2917)
|
||||
- Fix list-routes command respecting identifier filter with JSON output [#2927](https://github.com/juanfont/headscale/pull/2927)
|
||||
- **API Key CLI**: Add `--id` flag to expire/delete commands as alternative to `--prefix` [#3016](https://github.com/juanfont/headscale/pull/3016)
|
||||
- `headscale apikeys expire --id <ID>` or `--prefix <PREFIX>`
|
||||
- `headscale apikeys delete --id <ID>` or `--prefix <PREFIX>`
|
||||
- Add `--id` flag to expire/delete commands as alternative to `--prefix` for API Keys [#3016](https://github.com/juanfont/headscale/pull/3016)
|
||||
|
||||
## 0.27.1 (2025-11-11)
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# For testing purposes only
|
||||
|
||||
FROM golang:alpine AS build-env
|
||||
FROM golang:1.26.1-alpine AS build-env
|
||||
|
||||
WORKDIR /go/src
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# and are in no way endorsed by Headscale's maintainers as an
|
||||
# official nor supported release or distribution.
|
||||
|
||||
FROM docker.io/golang:1.25-trixie AS builder
|
||||
FROM docker.io/golang:1.26.1-trixie AS builder
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# This Dockerfile is more or less lifted from tailscale/tailscale
|
||||
# to ensure a similar build process when testing the HEAD of tailscale.
|
||||
|
||||
FROM golang:1.25-alpine AS build-env
|
||||
FROM golang:1.26.1-alpine AS build-env
|
||||
|
||||
WORKDIR /go/src
|
||||
|
||||
|
||||
17
Makefile
17
Makefile
@@ -21,7 +21,7 @@ endef
|
||||
# Source file collections using shell find for better performance
|
||||
GO_SOURCES := $(shell find . -name '*.go' -not -path './gen/*' -not -path './vendor/*')
|
||||
PROTO_SOURCES := $(shell find . -name '*.proto' -not -path './gen/*' -not -path './vendor/*')
|
||||
DOC_SOURCES := $(shell find . \( -name '*.md' -o -name '*.yaml' -o -name '*.yml' -o -name '*.ts' -o -name '*.js' -o -name '*.html' -o -name '*.css' -o -name '*.scss' -o -name '*.sass' \) -not -path './gen/*' -not -path './vendor/*' -not -path './node_modules/*')
|
||||
PRETTIER_SOURCES := $(shell find . \( -name '*.md' -o -name '*.yaml' -o -name '*.yml' -o -name '*.ts' -o -name '*.js' -o -name '*.html' -o -name '*.css' -o -name '*.scss' -o -name '*.sass' \) -not -path './gen/*' -not -path './vendor/*' -not -path './node_modules/*')
|
||||
|
||||
# Default target
|
||||
.PHONY: all
|
||||
@@ -33,6 +33,7 @@ check-deps:
|
||||
$(call check_tool,go)
|
||||
$(call check_tool,golangci-lint)
|
||||
$(call check_tool,gofumpt)
|
||||
$(call check_tool,mdformat)
|
||||
$(call check_tool,prettier)
|
||||
$(call check_tool,clang-format)
|
||||
$(call check_tool,buf)
|
||||
@@ -52,7 +53,7 @@ test: check-deps $(GO_SOURCES) go.mod go.sum
|
||||
|
||||
# Formatting targets
|
||||
.PHONY: fmt
|
||||
fmt: fmt-go fmt-prettier fmt-proto
|
||||
fmt: fmt-go fmt-mdformat fmt-prettier fmt-proto
|
||||
|
||||
.PHONY: fmt-go
|
||||
fmt-go: check-deps $(GO_SOURCES)
|
||||
@@ -60,9 +61,14 @@ fmt-go: check-deps $(GO_SOURCES)
|
||||
gofumpt -l -w .
|
||||
golangci-lint run --fix
|
||||
|
||||
.PHONY: fmt-mdformat
|
||||
fmt-mdformat: check-deps
|
||||
@echo "Formatting documentation..."
|
||||
mdformat docs/
|
||||
|
||||
.PHONY: fmt-prettier
|
||||
fmt-prettier: check-deps $(DOC_SOURCES)
|
||||
@echo "Formatting documentation and config files..."
|
||||
fmt-prettier: check-deps $(PRETTIER_SOURCES)
|
||||
@echo "Formatting markup and config files..."
|
||||
prettier --write '**/*.{ts,js,md,yaml,yml,sass,css,scss,html}'
|
||||
|
||||
.PHONY: fmt-proto
|
||||
@@ -116,7 +122,8 @@ help:
|
||||
@echo ""
|
||||
@echo "Specific targets:"
|
||||
@echo " fmt-go - Format Go code only"
|
||||
@echo " fmt-prettier - Format documentation only"
|
||||
@echo " fmt-mdformat - Format documentation only"
|
||||
@echo " fmt-prettier - Format markup and config files only"
|
||||
@echo " fmt-proto - Format Protocol Buffer files only"
|
||||
@echo " lint-go - Lint Go code only"
|
||||
@echo " lint-proto - Lint Protocol Buffer files only"
|
||||
|
||||
@@ -67,6 +67,8 @@ For NixOS users, a module is available in [`nix/`](./nix/).
|
||||
|
||||
## Talks
|
||||
|
||||
- Fosdem 2026 (video): [Headscale & Tailscale: The complementary open source clone](https://fosdem.org/2026/schedule/event/KYQ3LL-headscale-the-complementary-open-source-clone/)
|
||||
- presented by Kristoffer Dalby
|
||||
- Fosdem 2023 (video): [Headscale: How we are using integration testing to reimplement Tailscale](https://fosdem.org/2023/schedule/event/goheadscale/)
|
||||
- presented by Juan Font Alonso and Kristoffer Dalby
|
||||
|
||||
@@ -105,6 +107,8 @@ run `make lint` and `make fmt` before committing any code.
|
||||
The **Proto** code is linted with [`buf`](https://docs.buf.build/lint/overview) and
|
||||
formatted with [`clang-format`](https://clang.llvm.org/docs/ClangFormat.html).
|
||||
|
||||
The **docs** are formatted with [`mdformat`](https://mdformat.readthedocs.io).
|
||||
|
||||
The **rest** (Markdown, YAML, etc) is formatted with [`prettier`](https://prettier.io).
|
||||
|
||||
Check out the `.golangci.yaml` and `Makefile` to see the specific configuration.
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
// 90 days.
|
||||
// DefaultAPIKeyExpiry is 90 days.
|
||||
DefaultAPIKeyExpiry = "90d"
|
||||
)
|
||||
|
||||
@@ -46,55 +44,35 @@ var listAPIKeys = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List the Api keys for headscale",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ListApiKeysRequest{}
|
||||
|
||||
response, err := client.ListApiKeys(ctx, request)
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
response, err := client.ListApiKeys(ctx, &v1.ListApiKeysRequest{})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting the list of keys: %s", err),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("listing api keys: %w", err)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetApiKeys(), "", output)
|
||||
}
|
||||
|
||||
tableData := pterm.TableData{
|
||||
{"ID", "Prefix", "Expiration", "Created"},
|
||||
}
|
||||
for _, key := range response.GetApiKeys() {
|
||||
expiration := "-"
|
||||
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = ColourTime(key.GetExpiration().AsTime())
|
||||
return printListOutput(cmd, response.GetApiKeys(), func() error {
|
||||
tableData := pterm.TableData{
|
||||
{"ID", "Prefix", "Expiration", "Created"},
|
||||
}
|
||||
|
||||
tableData = append(tableData, []string{
|
||||
strconv.FormatUint(key.GetId(), util.Base10),
|
||||
key.GetPrefix(),
|
||||
expiration,
|
||||
key.GetCreatedAt().AsTime().Format(HeadscaleDateTimeFormat),
|
||||
})
|
||||
for _, key := range response.GetApiKeys() {
|
||||
expiration := "-"
|
||||
|
||||
}
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = ColourTime(key.GetExpiration().AsTime())
|
||||
}
|
||||
|
||||
tableData = append(tableData, []string{
|
||||
strconv.FormatUint(key.GetId(), util.Base10),
|
||||
key.GetPrefix(),
|
||||
expiration,
|
||||
key.GetCreatedAt().AsTime().Format(HeadscaleDateTimeFormat),
|
||||
})
|
||||
}
|
||||
|
||||
return pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
})
|
||||
}),
|
||||
}
|
||||
|
||||
var createAPIKeyCmd = &cobra.Command{
|
||||
@@ -105,137 +83,79 @@ Creates a new Api key, the Api key is only visible on creation
|
||||
and cannot be retrieved again.
|
||||
If you loose a key, create a new one and revoke (expire) the old one.`,
|
||||
Aliases: []string{"c", "new"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
request := &v1.CreateApiKeyRequest{}
|
||||
|
||||
durationStr, _ := cmd.Flags().GetString("expiration")
|
||||
|
||||
duration, err := model.ParseDuration(durationStr)
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
expiration, err := expirationFromFlag(cmd)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Could not parse duration: %s\n", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
expiration := time.Now().UTC().Add(time.Duration(duration))
|
||||
|
||||
request.Expiration = timestamppb.New(expiration)
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.CreateApiKey(ctx, request)
|
||||
response, err := client.CreateApiKey(ctx, &v1.CreateApiKeyRequest{
|
||||
Expiration: expiration,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot create Api Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("creating api key: %w", err)
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetApiKey(), response.GetApiKey(), output)
|
||||
},
|
||||
return printOutput(cmd, response.GetApiKey(), response.GetApiKey())
|
||||
}),
|
||||
}
|
||||
|
||||
// apiKeyIDOrPrefix reads --id and --prefix from cmd and validates that
|
||||
// exactly one is provided.
|
||||
func apiKeyIDOrPrefix(cmd *cobra.Command) (uint64, string, error) {
|
||||
id, _ := cmd.Flags().GetUint64("id")
|
||||
prefix, _ := cmd.Flags().GetString("prefix")
|
||||
|
||||
switch {
|
||||
case id == 0 && prefix == "":
|
||||
return 0, "", fmt.Errorf("either --id or --prefix must be provided: %w", errMissingParameter)
|
||||
case id != 0 && prefix != "":
|
||||
return 0, "", fmt.Errorf("only one of --id or --prefix can be provided: %w", errMissingParameter)
|
||||
}
|
||||
|
||||
return id, prefix, nil
|
||||
}
|
||||
|
||||
var expireAPIKeyCmd = &cobra.Command{
|
||||
Use: "expire",
|
||||
Short: "Expire an ApiKey",
|
||||
Aliases: []string{"revoke", "exp", "e"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
id, _ := cmd.Flags().GetUint64("id")
|
||||
prefix, _ := cmd.Flags().GetString("prefix")
|
||||
|
||||
switch {
|
||||
case id == 0 && prefix == "":
|
||||
ErrorOutput(
|
||||
errMissingParameter,
|
||||
"Either --id or --prefix must be provided",
|
||||
output,
|
||||
)
|
||||
case id != 0 && prefix != "":
|
||||
ErrorOutput(
|
||||
errMissingParameter,
|
||||
"Only one of --id or --prefix can be provided",
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpireApiKeyRequest{}
|
||||
if id != 0 {
|
||||
request.Id = id
|
||||
} else {
|
||||
request.Prefix = prefix
|
||||
}
|
||||
|
||||
response, err := client.ExpireApiKey(ctx, request)
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
id, prefix, err := apiKeyIDOrPrefix(cmd)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot expire Api Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
SuccessOutput(response, "Key expired", output)
|
||||
},
|
||||
response, err := client.ExpireApiKey(ctx, &v1.ExpireApiKeyRequest{
|
||||
Id: id,
|
||||
Prefix: prefix,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("expiring api key: %w", err)
|
||||
}
|
||||
|
||||
return printOutput(cmd, response, "Key expired")
|
||||
}),
|
||||
}
|
||||
|
||||
var deleteAPIKeyCmd = &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete an ApiKey",
|
||||
Aliases: []string{"remove", "del"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
id, _ := cmd.Flags().GetUint64("id")
|
||||
prefix, _ := cmd.Flags().GetString("prefix")
|
||||
|
||||
switch {
|
||||
case id == 0 && prefix == "":
|
||||
ErrorOutput(
|
||||
errMissingParameter,
|
||||
"Either --id or --prefix must be provided",
|
||||
output,
|
||||
)
|
||||
case id != 0 && prefix != "":
|
||||
ErrorOutput(
|
||||
errMissingParameter,
|
||||
"Only one of --id or --prefix can be provided",
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.DeleteApiKeyRequest{}
|
||||
if id != 0 {
|
||||
request.Id = id
|
||||
} else {
|
||||
request.Prefix = prefix
|
||||
}
|
||||
|
||||
response, err := client.DeleteApiKey(ctx, request)
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
id, prefix, err := apiKeyIDOrPrefix(cmd)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot delete Api Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
SuccessOutput(response, "Key deleted", output)
|
||||
},
|
||||
response, err := client.DeleteApiKey(ctx, &v1.DeleteApiKeyRequest{
|
||||
Id: id,
|
||||
Prefix: prefix,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting api key: %w", err)
|
||||
}
|
||||
|
||||
return printOutput(cmd, response, "Key deleted")
|
||||
}),
|
||||
}
|
||||
|
||||
93
cmd/headscale/cli/auth.go
Normal file
93
cmd/headscale/cli/auth.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(authCmd)
|
||||
|
||||
authRegisterCmd.Flags().StringP("user", "u", "", "User")
|
||||
authRegisterCmd.Flags().String("auth-id", "", "Auth ID")
|
||||
mustMarkRequired(authRegisterCmd, "user", "auth-id")
|
||||
authCmd.AddCommand(authRegisterCmd)
|
||||
|
||||
authApproveCmd.Flags().String("auth-id", "", "Auth ID")
|
||||
mustMarkRequired(authApproveCmd, "auth-id")
|
||||
authCmd.AddCommand(authApproveCmd)
|
||||
|
||||
authRejectCmd.Flags().String("auth-id", "", "Auth ID")
|
||||
mustMarkRequired(authRejectCmd, "auth-id")
|
||||
authCmd.AddCommand(authRejectCmd)
|
||||
}
|
||||
|
||||
var authCmd = &cobra.Command{
|
||||
Use: "auth",
|
||||
Short: "Manage node authentication and approval",
|
||||
}
|
||||
|
||||
var authRegisterCmd = &cobra.Command{
|
||||
Use: "register",
|
||||
Short: "Register a node to your network",
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
user, _ := cmd.Flags().GetString("user")
|
||||
authID, _ := cmd.Flags().GetString("auth-id")
|
||||
|
||||
request := &v1.AuthRegisterRequest{
|
||||
AuthId: authID,
|
||||
User: user,
|
||||
}
|
||||
|
||||
response, err := client.AuthRegister(ctx, request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("registering node: %w", err)
|
||||
}
|
||||
|
||||
return printOutput(
|
||||
cmd,
|
||||
response.GetNode(),
|
||||
fmt.Sprintf("Node %s registered", response.GetNode().GetGivenName()))
|
||||
}),
|
||||
}
|
||||
|
||||
var authApproveCmd = &cobra.Command{
|
||||
Use: "approve",
|
||||
Short: "Approve a pending authentication request",
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
authID, _ := cmd.Flags().GetString("auth-id")
|
||||
|
||||
request := &v1.AuthApproveRequest{
|
||||
AuthId: authID,
|
||||
}
|
||||
|
||||
response, err := client.AuthApprove(ctx, request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("approving auth request: %w", err)
|
||||
}
|
||||
|
||||
return printOutput(cmd, response, "Auth request approved")
|
||||
}),
|
||||
}
|
||||
|
||||
var authRejectCmd = &cobra.Command{
|
||||
Use: "reject",
|
||||
Short: "Reject a pending authentication request",
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
authID, _ := cmd.Flags().GetString("auth-id")
|
||||
|
||||
request := &v1.AuthRejectRequest{
|
||||
AuthId: authID,
|
||||
}
|
||||
|
||||
response, err := client.AuthReject(ctx, request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rejecting auth request: %w", err)
|
||||
}
|
||||
|
||||
return printOutput(cmd, response, "Auth request rejected")
|
||||
}),
|
||||
}
|
||||
@@ -1,7 +1,8 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"github.com/rs/zerolog/log"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -13,10 +14,12 @@ var configTestCmd = &cobra.Command{
|
||||
Use: "configtest",
|
||||
Short: "Test the configuration.",
|
||||
Long: "Run a test of the configuration and exit.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
_, err := newHeadscaleServerWithConfig()
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msg("Error initializing")
|
||||
return fmt.Errorf("configuration error: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,44 +1,22 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Error is used to compare errors as per https://dave.cheney.net/2016/04/07/constant-errors
|
||||
type Error string
|
||||
|
||||
func (e Error) Error() string { return string(e) }
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(debugCmd)
|
||||
|
||||
createNodeCmd.Flags().StringP("name", "", "", "Name")
|
||||
err := createNodeCmd.MarkFlagRequired("name")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
createNodeCmd.Flags().StringP("user", "u", "", "User")
|
||||
|
||||
createNodeCmd.Flags().StringP("namespace", "n", "", "User")
|
||||
createNodeNamespaceFlag := createNodeCmd.Flags().Lookup("namespace")
|
||||
createNodeNamespaceFlag.Deprecated = deprecateNamespaceMessage
|
||||
createNodeNamespaceFlag.Hidden = true
|
||||
|
||||
err = createNodeCmd.MarkFlagRequired("user")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
createNodeCmd.Flags().StringP("key", "k", "", "Key")
|
||||
err = createNodeCmd.MarkFlagRequired("key")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
mustMarkRequired(createNodeCmd, "name", "user", "key")
|
||||
|
||||
createNodeCmd.Flags().
|
||||
StringSliceP("route", "r", []string{}, "List (or repeated flags) of routes to advertise")
|
||||
|
||||
@@ -53,54 +31,18 @@ var debugCmd = &cobra.Command{
|
||||
|
||||
var createNodeCmd = &cobra.Command{
|
||||
Use: "create-node",
|
||||
Short: "Create a node that can be registered with `nodes register <>` command",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
Short: "Create a node that can be registered with `auth register <>` command",
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
user, _ := cmd.Flags().GetString("user")
|
||||
name, _ := cmd.Flags().GetString("name")
|
||||
registrationID, _ := cmd.Flags().GetString("key")
|
||||
|
||||
user, err := cmd.Flags().GetString("user")
|
||||
_, err := types.AuthIDFromString(registrationID)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
return fmt.Errorf("parsing machine key: %w", err)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
name, err := cmd.Flags().GetString("name")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting node from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
registrationID, err := cmd.Flags().GetString("key")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting key from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
_, err = types.RegistrationIDFromString(registrationID)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to parse machine key from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
routes, err := cmd.Flags().GetStringSlice("route")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting routes from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
routes, _ := cmd.Flags().GetStringSlice("route")
|
||||
|
||||
request := &v1.DebugCreateNodeRequest{
|
||||
Key: registrationID,
|
||||
@@ -111,13 +53,9 @@ var createNodeCmd = &cobra.Command{
|
||||
|
||||
response, err := client.DebugCreateNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot create node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("creating node: %w", err)
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetNode(), "Node created", output)
|
||||
},
|
||||
return printOutput(cmd, response.GetNode(), "Node created")
|
||||
}),
|
||||
}
|
||||
|
||||
@@ -15,14 +15,12 @@ var dumpConfigCmd = &cobra.Command{
|
||||
Use: "dumpConfig",
|
||||
Short: "dump current config to /etc/headscale/config.dump.yaml, integration test only",
|
||||
Hidden: true,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
err := viper.WriteConfigAs("/etc/headscale/config.dump.yaml")
|
||||
if err != nil {
|
||||
//nolint
|
||||
fmt.Println("Failed to dump config")
|
||||
return fmt.Errorf("dumping config: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -21,22 +21,17 @@ var generateCmd = &cobra.Command{
|
||||
var generatePrivateKeyCmd = &cobra.Command{
|
||||
Use: "private-key",
|
||||
Short: "Generate a private key for the headscale server",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
machineKeyStr, err := machineKey.MarshalText()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine key from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("marshalling machine key: %w", err)
|
||||
}
|
||||
|
||||
SuccessOutput(map[string]string{
|
||||
return printOutput(cmd, map[string]string{
|
||||
"private_key": string(machineKeyStr),
|
||||
},
|
||||
string(machineKeyStr), output)
|
||||
string(machineKeyStr))
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -13,17 +16,12 @@ var healthCmd = &cobra.Command{
|
||||
Use: "health",
|
||||
Short: "Check the health of the Headscale server",
|
||||
Long: "Check the health of the Headscale server. This command will return an exit code of 0 if the server is healthy, or 1 if it is not.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
response, err := client.Health(ctx, &v1.HealthRequest{})
|
||||
if err != nil {
|
||||
ErrorOutput(err, "Error checking health", output)
|
||||
return fmt.Errorf("checking health: %w", err)
|
||||
}
|
||||
|
||||
SuccessOutput(response, "", output)
|
||||
},
|
||||
return printOutput(cmd, response, "")
|
||||
}),
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -10,15 +10,22 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/util/zlog/zf"
|
||||
"github.com/oauth2-proxy/mockoidc"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Error is used to compare errors as per https://dave.cheney.net/2016/04/07/constant-errors
|
||||
type Error string
|
||||
|
||||
func (e Error) Error() string { return string(e) }
|
||||
|
||||
const (
|
||||
errMockOidcClientIDNotDefined = Error("MOCKOIDC_CLIENT_ID not defined")
|
||||
errMockOidcClientSecretNotDefined = Error("MOCKOIDC_CLIENT_SECRET not defined")
|
||||
errMockOidcPortNotDefined = Error("MOCKOIDC_PORT not defined")
|
||||
errMockOidcUsersNotDefined = Error("MOCKOIDC_USERS not defined")
|
||||
refreshTTL = 60 * time.Minute
|
||||
)
|
||||
|
||||
@@ -32,12 +39,13 @@ var mockOidcCmd = &cobra.Command{
|
||||
Use: "mockoidc",
|
||||
Short: "Runs a mock OIDC server for testing",
|
||||
Long: "This internal command runs a OpenID Connect for testing purposes",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
err := mockOIDC()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error running mock OIDC server")
|
||||
os.Exit(1)
|
||||
return fmt.Errorf("running mock OIDC server: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -46,41 +54,47 @@ func mockOIDC() error {
|
||||
if clientID == "" {
|
||||
return errMockOidcClientIDNotDefined
|
||||
}
|
||||
|
||||
clientSecret := os.Getenv("MOCKOIDC_CLIENT_SECRET")
|
||||
if clientSecret == "" {
|
||||
return errMockOidcClientSecretNotDefined
|
||||
}
|
||||
|
||||
addrStr := os.Getenv("MOCKOIDC_ADDR")
|
||||
if addrStr == "" {
|
||||
return errMockOidcPortNotDefined
|
||||
}
|
||||
|
||||
portStr := os.Getenv("MOCKOIDC_PORT")
|
||||
if portStr == "" {
|
||||
return errMockOidcPortNotDefined
|
||||
}
|
||||
|
||||
accessTTLOverride := os.Getenv("MOCKOIDC_ACCESS_TTL")
|
||||
if accessTTLOverride != "" {
|
||||
newTTL, err := time.ParseDuration(accessTTLOverride)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
accessTTL = newTTL
|
||||
}
|
||||
|
||||
userStr := os.Getenv("MOCKOIDC_USERS")
|
||||
if userStr == "" {
|
||||
return errors.New("MOCKOIDC_USERS not defined")
|
||||
return errMockOidcUsersNotDefined
|
||||
}
|
||||
|
||||
var users []mockoidc.MockUser
|
||||
|
||||
err := json.Unmarshal([]byte(userStr), &users)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unmarshalling users: %w", err)
|
||||
}
|
||||
|
||||
log.Info().Interface("users", users).Msg("loading users from JSON")
|
||||
log.Info().Interface(zf.Users, users).Msg("loading users from JSON")
|
||||
|
||||
log.Info().Msgf("Access token TTL: %s", accessTTL)
|
||||
log.Info().Msgf("access token TTL: %s", accessTTL)
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
@@ -92,7 +106,7 @@ func mockOIDC() error {
|
||||
return err
|
||||
}
|
||||
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", addrStr, port))
|
||||
listener, err := new(net.ListenConfig).Listen(context.Background(), "tcp", fmt.Sprintf("%s:%d", addrStr, port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -101,8 +115,10 @@ func mockOIDC() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info().Msgf("Mock OIDC server listening on %s", listener.Addr().String())
|
||||
log.Info().Msgf("Issuer: %s", mock.Issuer())
|
||||
|
||||
log.Info().Msgf("mock OIDC server listening on %s", listener.Addr().String())
|
||||
log.Info().Msgf("issuer: %s", mock.Issuer())
|
||||
|
||||
c := make(chan struct{})
|
||||
<-c
|
||||
|
||||
@@ -133,12 +149,13 @@ func getMockOIDC(clientID string, clientSecret string, users []mockoidc.MockUser
|
||||
ErrorQueue: &mockoidc.ErrorQueue{},
|
||||
}
|
||||
|
||||
mock.AddMiddleware(func(h http.Handler) http.Handler {
|
||||
_ = mock.AddMiddleware(func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
log.Info().Msgf("Request: %+v", r)
|
||||
log.Info().Msgf("request: %+v", r)
|
||||
h.ServeHTTP(w, r)
|
||||
|
||||
if r.Response != nil {
|
||||
log.Info().Msgf("Response: %+v", r.Response)
|
||||
log.Info().Msgf("response: %+v", r.Response)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/samber/lo"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
@@ -21,63 +20,37 @@ import (
|
||||
func init() {
|
||||
rootCmd.AddCommand(nodeCmd)
|
||||
listNodesCmd.Flags().StringP("user", "u", "", "Filter by user")
|
||||
|
||||
listNodesCmd.Flags().StringP("namespace", "n", "", "User")
|
||||
listNodesNamespaceFlag := listNodesCmd.Flags().Lookup("namespace")
|
||||
listNodesNamespaceFlag.Deprecated = deprecateNamespaceMessage
|
||||
listNodesNamespaceFlag.Hidden = true
|
||||
nodeCmd.AddCommand(listNodesCmd)
|
||||
|
||||
listNodeRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
nodeCmd.AddCommand(listNodeRoutesCmd)
|
||||
|
||||
registerNodeCmd.Flags().StringP("user", "u", "", "User")
|
||||
|
||||
registerNodeCmd.Flags().StringP("namespace", "n", "", "User")
|
||||
registerNodeNamespaceFlag := registerNodeCmd.Flags().Lookup("namespace")
|
||||
registerNodeNamespaceFlag.Deprecated = deprecateNamespaceMessage
|
||||
registerNodeNamespaceFlag.Hidden = true
|
||||
|
||||
err := registerNodeCmd.MarkFlagRequired("user")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
registerNodeCmd.Flags().StringP("key", "k", "", "Key")
|
||||
err = registerNodeCmd.MarkFlagRequired("key")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
mustMarkRequired(registerNodeCmd, "user", "key")
|
||||
nodeCmd.AddCommand(registerNodeCmd)
|
||||
|
||||
expireNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
expireNodeCmd.Flags().StringP("expiry", "e", "", "Set expire to (RFC3339 format, e.g. 2025-08-27T10:00:00Z), or leave empty to expire immediately.")
|
||||
err = expireNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
expireNodeCmd.Flags().BoolP("disable", "d", false, "Disable key expiry (node will never expire)")
|
||||
mustMarkRequired(expireNodeCmd, "identifier")
|
||||
nodeCmd.AddCommand(expireNodeCmd)
|
||||
|
||||
renameNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = renameNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
mustMarkRequired(renameNodeCmd, "identifier")
|
||||
nodeCmd.AddCommand(renameNodeCmd)
|
||||
|
||||
deleteNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = deleteNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
mustMarkRequired(deleteNodeCmd, "identifier")
|
||||
nodeCmd.AddCommand(deleteNodeCmd)
|
||||
|
||||
tagCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
tagCmd.MarkFlagRequired("identifier")
|
||||
mustMarkRequired(tagCmd, "identifier")
|
||||
tagCmd.Flags().StringSliceP("tags", "t", []string{}, "List of tags to add to the node")
|
||||
nodeCmd.AddCommand(tagCmd)
|
||||
|
||||
approveRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
approveRoutesCmd.MarkFlagRequired("identifier")
|
||||
mustMarkRequired(approveRoutesCmd, "identifier")
|
||||
approveRoutesCmd.Flags().StringSliceP("routes", "r", []string{}, `List of routes that will be approved (comma-separated, e.g. "10.0.0.0/8,192.168.0.0/24" or empty string to remove all approved routes)`)
|
||||
nodeCmd.AddCommand(approveRoutesCmd)
|
||||
|
||||
@@ -87,31 +60,16 @@ func init() {
|
||||
var nodeCmd = &cobra.Command{
|
||||
Use: "nodes",
|
||||
Short: "Manage the nodes of Headscale",
|
||||
Aliases: []string{"node", "machine", "machines"},
|
||||
Aliases: []string{"node"},
|
||||
}
|
||||
|
||||
var registerNodeCmd = &cobra.Command{
|
||||
Use: "register",
|
||||
Short: "Registers a node to your network",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
user, err := cmd.Flags().GetString("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
registrationID, err := cmd.Flags().GetString("key")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting node key from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
Use: "register",
|
||||
Short: "Registers a node to your network",
|
||||
Deprecated: "use 'headscale auth register --auth-id <id> --user <user>' instead",
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
user, _ := cmd.Flags().GetString("user")
|
||||
registrationID, _ := cmd.Flags().GetString("key")
|
||||
|
||||
request := &v1.RegisterNodeRequest{
|
||||
Key: registrationID,
|
||||
@@ -120,98 +78,49 @@ var registerNodeCmd = &cobra.Command{
|
||||
|
||||
response, err := client.RegisterNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot register node: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("registering node: %w", err)
|
||||
}
|
||||
|
||||
SuccessOutput(
|
||||
return printOutput(
|
||||
cmd,
|
||||
response.GetNode(),
|
||||
fmt.Sprintf("Node %s registered", response.GetNode().GetGivenName()), output)
|
||||
},
|
||||
fmt.Sprintf("Node %s registered", response.GetNode().GetGivenName()))
|
||||
}),
|
||||
}
|
||||
|
||||
var listNodesCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List nodes",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
user, err := cmd.Flags().GetString("user")
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
user, _ := cmd.Flags().GetString("user")
|
||||
|
||||
response, err := client.ListNodes(ctx, &v1.ListNodesRequest{User: user})
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
return fmt.Errorf("listing nodes: %w", err)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
return printListOutput(cmd, response.GetNodes(), func() error {
|
||||
tableData, err := nodesToPtables(user, response.GetNodes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("converting to table: %w", err)
|
||||
}
|
||||
|
||||
request := &v1.ListNodesRequest{
|
||||
User: user,
|
||||
}
|
||||
|
||||
response, err := client.ListNodes(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot get nodes: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetNodes(), "", output)
|
||||
}
|
||||
|
||||
tableData, err := nodesToPtables(user, response.GetNodes())
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
}
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
return pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
})
|
||||
}),
|
||||
}
|
||||
|
||||
var listNodeRoutesCmd = &cobra.Command{
|
||||
Use: "list-routes",
|
||||
Short: "List routes available on nodes",
|
||||
Aliases: []string{"lsr", "routes"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
identifier, _ := cmd.Flags().GetUint64("identifier")
|
||||
|
||||
response, err := client.ListNodes(ctx, &v1.ListNodesRequest{})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ListNodesRequest{}
|
||||
|
||||
response, err := client.ListNodes(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot get nodes: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("listing nodes: %w", err)
|
||||
}
|
||||
|
||||
nodes := response.GetNodes()
|
||||
@@ -219,6 +128,7 @@ var listNodeRoutesCmd = &cobra.Command{
|
||||
for _, node := range response.GetNodes() {
|
||||
if node.GetId() == identifier {
|
||||
nodes = []*v1.Node{node}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -228,73 +138,51 @@ var listNodeRoutesCmd = &cobra.Command{
|
||||
return (n.GetSubnetRoutes() != nil && len(n.GetSubnetRoutes()) > 0) || (n.GetApprovedRoutes() != nil && len(n.GetApprovedRoutes()) > 0) || (n.GetAvailableRoutes() != nil && len(n.GetAvailableRoutes()) > 0)
|
||||
})
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(nodes, "", output)
|
||||
return
|
||||
}
|
||||
|
||||
tableData, err := nodeRoutesToPtables(nodes)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
}
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
return printListOutput(cmd, nodes, func() error {
|
||||
return pterm.DefaultTable.WithHasHeader().WithData(nodeRoutesToPtables(nodes)).Render()
|
||||
})
|
||||
}),
|
||||
}
|
||||
|
||||
var expireNodeCmd = &cobra.Command{
|
||||
Use: "expire",
|
||||
Short: "Expire (log out) a node in your network",
|
||||
Long: "Expiring a node will keep the node in the database and force it to reauthenticate.",
|
||||
Use: "expire",
|
||||
Short: "Expire (log out) a node in your network",
|
||||
Long: `Expiring a node will keep the node in the database and force it to reauthenticate.
|
||||
|
||||
Use --disable to disable key expiry (node will never expire).`,
|
||||
Aliases: []string{"logout", "exp", "e"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
identifier, _ := cmd.Flags().GetUint64("identifier")
|
||||
disableExpiry, _ := cmd.Flags().GetBool("disable")
|
||||
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
// Handle disable expiry - node will never expire.
|
||||
if disableExpiry {
|
||||
request := &v1.ExpireNodeRequest{
|
||||
NodeId: identifier,
|
||||
DisableExpiry: true,
|
||||
}
|
||||
|
||||
response, err := client.ExpireNode(ctx, request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("disabling node expiry: %w", err)
|
||||
}
|
||||
|
||||
return printOutput(cmd, response.GetNode(), "Node expiry disabled")
|
||||
}
|
||||
|
||||
expiry, err := cmd.Flags().GetString("expiry")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting expiry to string: %s", err),
|
||||
output,
|
||||
)
|
||||
expiry, _ := cmd.Flags().GetString("expiry")
|
||||
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
expiryTime := now
|
||||
if expiry != "" {
|
||||
var err error
|
||||
expiryTime, err = time.Parse(time.RFC3339, expiry)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting expiry to string: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
return fmt.Errorf("parsing expiry time: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpireNodeRequest{
|
||||
NodeId: identifier,
|
||||
Expiry: timestamppb.New(expiryTime),
|
||||
@@ -302,47 +190,28 @@ var expireNodeCmd = &cobra.Command{
|
||||
|
||||
response, err := client.ExpireNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot expire node: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("expiring node: %w", err)
|
||||
}
|
||||
|
||||
if now.Equal(expiryTime) || now.After(expiryTime) {
|
||||
SuccessOutput(response.GetNode(), "Node expired", output)
|
||||
} else {
|
||||
SuccessOutput(response.GetNode(), "Node expiration updated", output)
|
||||
return printOutput(cmd, response.GetNode(), "Node expired")
|
||||
}
|
||||
},
|
||||
|
||||
return printOutput(cmd, response.GetNode(), "Node expiration updated")
|
||||
}),
|
||||
}
|
||||
|
||||
var renameNodeCmd = &cobra.Command{
|
||||
Use: "rename NEW_NAME",
|
||||
Short: "Renames a node in your network",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
identifier, _ := cmd.Flags().GetUint64("identifier")
|
||||
|
||||
newName := ""
|
||||
if len(args) > 0 {
|
||||
newName = args[0]
|
||||
}
|
||||
|
||||
request := &v1.RenameNodeRequest{
|
||||
NodeId: identifier,
|
||||
NewName: newName,
|
||||
@@ -350,39 +219,19 @@ var renameNodeCmd = &cobra.Command{
|
||||
|
||||
response, err := client.RenameNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot rename node: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("renaming node: %w", err)
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetNode(), "Node renamed", output)
|
||||
},
|
||||
return printOutput(cmd, response.GetNode(), "Node renamed")
|
||||
}),
|
||||
}
|
||||
|
||||
var deleteNodeCmd = &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete a node",
|
||||
Aliases: []string{"del"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
identifier, _ := cmd.Flags().GetUint64("identifier")
|
||||
|
||||
getRequest := &v1.GetNodeRequest{
|
||||
NodeId: identifier,
|
||||
@@ -390,49 +239,31 @@ var deleteNodeCmd = &cobra.Command{
|
||||
|
||||
getResponse, err := client.GetNode(ctx, getRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error getting node node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("getting node: %w", err)
|
||||
}
|
||||
|
||||
deleteRequest := &v1.DeleteNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
|
||||
confirm := false
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
if !force {
|
||||
confirm = util.YesNo(fmt.Sprintf(
|
||||
"Do you want to remove the node %s?",
|
||||
getResponse.GetNode().GetName(),
|
||||
))
|
||||
if !confirmAction(cmd, fmt.Sprintf(
|
||||
"Do you want to remove the node %s?",
|
||||
getResponse.GetNode().GetName(),
|
||||
)) {
|
||||
return printOutput(cmd, map[string]string{"Result": "Node not deleted"}, "Node not deleted")
|
||||
}
|
||||
|
||||
if confirm || force {
|
||||
response, err := client.DeleteNode(ctx, deleteRequest)
|
||||
if output != "" {
|
||||
SuccessOutput(response, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error deleting node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
}
|
||||
SuccessOutput(
|
||||
map[string]string{"Result": "Node deleted"},
|
||||
"Node deleted",
|
||||
output,
|
||||
)
|
||||
} else {
|
||||
SuccessOutput(map[string]string{"Result": "Node not deleted"}, "Node not deleted", output)
|
||||
_, err = client.DeleteNode(ctx, deleteRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting node: %w", err)
|
||||
}
|
||||
},
|
||||
|
||||
return printOutput(
|
||||
cmd,
|
||||
map[string]string{"Result": "Node deleted"},
|
||||
"Node deleted",
|
||||
)
|
||||
}),
|
||||
}
|
||||
|
||||
var backfillNodeIPsCmd = &cobra.Command{
|
||||
@@ -450,32 +281,24 @@ all nodes that are missing.
|
||||
If you remove IPv4 or IPv6 prefixes from the config,
|
||||
it can be run to remove the IPs that should no longer
|
||||
be assigned to nodes.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
confirm := false
|
||||
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
if !force {
|
||||
confirm = util.YesNo("Are you sure that you want to assign/remove IPs to/from nodes?")
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if !confirmAction(cmd, "Are you sure that you want to assign/remove IPs to/from nodes?") {
|
||||
return nil
|
||||
}
|
||||
|
||||
if confirm || force {
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
changes, err := client.BackfillNodeIPs(ctx, &v1.BackfillNodeIPsRequest{Confirmed: confirm || force})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error backfilling IPs: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(changes, "Node IPs backfilled successfully", output)
|
||||
ctx, client, conn, cancel, err := newHeadscaleCLIWithConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to headscale: %w", err)
|
||||
}
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
changes, err := client.BackfillNodeIPs(ctx, &v1.BackfillNodeIPsRequest{Confirmed: true})
|
||||
if err != nil {
|
||||
return fmt.Errorf("backfilling IPs: %w", err)
|
||||
}
|
||||
|
||||
return printOutput(cmd, changes, "Node IPs backfilled successfully")
|
||||
},
|
||||
}
|
||||
|
||||
@@ -506,23 +329,30 @@ func nodesToPtables(
|
||||
ephemeral = true
|
||||
}
|
||||
|
||||
var lastSeen time.Time
|
||||
var lastSeenTime string
|
||||
var (
|
||||
lastSeen time.Time
|
||||
lastSeenTime string
|
||||
)
|
||||
|
||||
if node.GetLastSeen() != nil {
|
||||
lastSeen = node.GetLastSeen().AsTime()
|
||||
lastSeenTime = lastSeen.Format("2006-01-02 15:04:05")
|
||||
lastSeenTime = lastSeen.Format(HeadscaleDateTimeFormat)
|
||||
}
|
||||
|
||||
var expiry time.Time
|
||||
var expiryTime string
|
||||
var (
|
||||
expiry time.Time
|
||||
expiryTime string
|
||||
)
|
||||
|
||||
if node.GetExpiry() != nil {
|
||||
expiry = node.GetExpiry().AsTime()
|
||||
expiryTime = expiry.Format("2006-01-02 15:04:05")
|
||||
expiryTime = expiry.Format(HeadscaleDateTimeFormat)
|
||||
} else {
|
||||
expiryTime = "N/A"
|
||||
}
|
||||
|
||||
var machineKey key.MachinePublic
|
||||
|
||||
err := machineKey.UnmarshalText(
|
||||
[]byte(node.GetMachineKey()),
|
||||
)
|
||||
@@ -531,6 +361,7 @@ func nodesToPtables(
|
||||
}
|
||||
|
||||
var nodeKey key.NodePublic
|
||||
|
||||
err = nodeKey.UnmarshalText(
|
||||
[]byte(node.GetNodeKey()),
|
||||
)
|
||||
@@ -546,42 +377,39 @@ func nodesToPtables(
|
||||
}
|
||||
|
||||
var expired string
|
||||
if expiry.IsZero() || expiry.After(time.Now()) {
|
||||
expired = pterm.LightGreen("no")
|
||||
} else {
|
||||
if node.GetExpiry() != nil && node.GetExpiry().AsTime().Before(time.Now()) {
|
||||
expired = pterm.LightRed("yes")
|
||||
} else {
|
||||
expired = pterm.LightGreen("no")
|
||||
}
|
||||
|
||||
// TODO(kradalby): as part of CLI rework, we should add the posibility to show "unusable" tags as mentioned in
|
||||
// https://github.com/juanfont/headscale/issues/2981
|
||||
var tagsBuilder strings.Builder
|
||||
|
||||
for _, tag := range node.GetTags() {
|
||||
tagsBuilder.WriteString("\n" + tag)
|
||||
}
|
||||
|
||||
tags := tagsBuilder.String()
|
||||
|
||||
tags = strings.TrimLeft(tags, "\n")
|
||||
tags := strings.TrimLeft(tagsBuilder.String(), "\n")
|
||||
|
||||
var user string
|
||||
if currentUser == "" || (currentUser == node.GetUser().GetName()) {
|
||||
user = pterm.LightMagenta(node.GetUser().GetName())
|
||||
} else {
|
||||
// Shared into this user
|
||||
user = pterm.LightYellow(node.GetUser().GetName())
|
||||
if node.GetUser() != nil {
|
||||
user = node.GetUser().GetName()
|
||||
}
|
||||
|
||||
var IPV4Address string
|
||||
var IPV6Address string
|
||||
var ipBuilder strings.Builder
|
||||
for _, addr := range node.GetIpAddresses() {
|
||||
if netip.MustParseAddr(addr).Is4() {
|
||||
IPV4Address = addr
|
||||
} else {
|
||||
IPV6Address = addr
|
||||
ip, err := netip.ParseAddr(addr)
|
||||
if err == nil {
|
||||
if ipBuilder.Len() > 0 {
|
||||
ipBuilder.WriteString("\n")
|
||||
}
|
||||
|
||||
ipBuilder.WriteString(ip.String())
|
||||
}
|
||||
}
|
||||
|
||||
ipAddresses := ipBuilder.String()
|
||||
|
||||
nodeData := []string{
|
||||
strconv.FormatUint(node.GetId(), util.Base10),
|
||||
node.GetName(),
|
||||
@@ -590,7 +418,7 @@ func nodesToPtables(
|
||||
nodeKey.ShortString(),
|
||||
user,
|
||||
tags,
|
||||
strings.Join([]string{IPV4Address, IPV6Address}, ", "),
|
||||
ipAddresses,
|
||||
strconv.FormatBool(ephemeral),
|
||||
lastSeenTime,
|
||||
expiryTime,
|
||||
@@ -608,7 +436,7 @@ func nodesToPtables(
|
||||
|
||||
func nodeRoutesToPtables(
|
||||
nodes []*v1.Node,
|
||||
) (pterm.TableData, error) {
|
||||
) pterm.TableData {
|
||||
tableHeader := []string{
|
||||
"ID",
|
||||
"Hostname",
|
||||
@@ -632,108 +460,50 @@ func nodeRoutesToPtables(
|
||||
)
|
||||
}
|
||||
|
||||
return tableData, nil
|
||||
return tableData
|
||||
}
|
||||
|
||||
var tagCmd = &cobra.Command{
|
||||
Use: "tag",
|
||||
Short: "Manage the tags of a node",
|
||||
Aliases: []string{"tags", "t"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
// retrieve flags from CLI
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
tagsToSet, err := cmd.Flags().GetStringSlice("tags")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error retrieving list of tags to add to node, %v", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
identifier, _ := cmd.Flags().GetUint64("identifier")
|
||||
tagsToSet, _ := cmd.Flags().GetStringSlice("tags")
|
||||
|
||||
// Sending tags to node
|
||||
request := &v1.SetTagsRequest{
|
||||
NodeId: identifier,
|
||||
Tags: tagsToSet,
|
||||
}
|
||||
|
||||
resp, err := client.SetTags(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error while sending tags to headscale: %s", err),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("setting tags: %w", err)
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
SuccessOutput(
|
||||
resp.GetNode(),
|
||||
"Node updated",
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
return printOutput(cmd, resp.GetNode(), "Node updated")
|
||||
}),
|
||||
}
|
||||
|
||||
var approveRoutesCmd = &cobra.Command{
|
||||
Use: "approve-routes",
|
||||
Short: "Manage the approved routes of a node",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
// retrieve flags from CLI
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
routes, err := cmd.Flags().GetStringSlice("routes")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error retrieving list of routes to add to node, %v", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
identifier, _ := cmd.Flags().GetUint64("identifier")
|
||||
routes, _ := cmd.Flags().GetStringSlice("routes")
|
||||
|
||||
// Sending routes to node
|
||||
request := &v1.SetApprovedRoutesRequest{
|
||||
NodeId: identifier,
|
||||
Routes: routes,
|
||||
}
|
||||
|
||||
resp, err := client.SetApprovedRoutes(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error while sending routes to headscale: %s", err),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("setting approved routes: %w", err)
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
SuccessOutput(
|
||||
resp.GetNode(),
|
||||
"Node updated",
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
return printOutput(cmd, resp.GetNode(), "Node updated")
|
||||
}),
|
||||
}
|
||||
|
||||
@@ -1,24 +1,41 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/db"
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"tailscale.com/types/views"
|
||||
)
|
||||
|
||||
const (
|
||||
bypassFlag = "bypass-grpc-and-access-database-directly"
|
||||
bypassFlag = "bypass-grpc-and-access-database-directly" //nolint:gosec // not a credential
|
||||
)
|
||||
|
||||
var errAborted = errors.New("command aborted by user")
|
||||
|
||||
// bypassDatabase loads the server config and opens the database directly,
|
||||
// bypassing the gRPC server. The caller is responsible for closing the
|
||||
// returned database handle.
|
||||
func bypassDatabase() (*db.HSDatabase, error) {
|
||||
cfg, err := types.LoadServerConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading config: %w", err)
|
||||
}
|
||||
|
||||
d, err := db.NewHeadscaleDatabase(cfg, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("opening database: %w", err)
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(policyCmd)
|
||||
|
||||
@@ -26,16 +43,12 @@ func init() {
|
||||
policyCmd.AddCommand(getPolicy)
|
||||
|
||||
setPolicy.Flags().StringP("file", "f", "", "Path to a policy file in HuJSON format")
|
||||
if err := setPolicy.MarkFlagRequired("file"); err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
setPolicy.Flags().BoolP(bypassFlag, "", false, "Uses the headscale config to directly access the database, bypassing gRPC and does not require the server to be running")
|
||||
mustMarkRequired(setPolicy, "file")
|
||||
policyCmd.AddCommand(setPolicy)
|
||||
|
||||
checkPolicy.Flags().StringP("file", "f", "", "Path to a policy file in HuJSON format")
|
||||
if err := checkPolicy.MarkFlagRequired("file"); err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
mustMarkRequired(checkPolicy, "file")
|
||||
policyCmd.AddCommand(checkPolicy)
|
||||
}
|
||||
|
||||
@@ -48,59 +61,46 @@ var getPolicy = &cobra.Command{
|
||||
Use: "get",
|
||||
Short: "Print the current ACL Policy",
|
||||
Aliases: []string{"show", "view", "fetch"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
var policy string
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
var policyData string
|
||||
if bypass, _ := cmd.Flags().GetBool(bypassFlag); bypass {
|
||||
confirm := false
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
if !force {
|
||||
confirm = util.YesNo("DO NOT run this command if an instance of headscale is running, are you sure headscale is not running?")
|
||||
if !confirmAction(cmd, "DO NOT run this command if an instance of headscale is running, are you sure headscale is not running?") {
|
||||
return errAborted
|
||||
}
|
||||
|
||||
if !confirm && !force {
|
||||
ErrorOutput(nil, "Aborting command", output)
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := types.LoadServerConfig()
|
||||
d, err := bypassDatabase()
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed loading config: %s", err), output)
|
||||
}
|
||||
|
||||
d, err := db.NewHeadscaleDatabase(
|
||||
cfg,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to open database: %s", err), output)
|
||||
return err
|
||||
}
|
||||
defer d.Close()
|
||||
|
||||
pol, err := d.GetPolicy()
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed loading Policy from database: %s", err), output)
|
||||
return fmt.Errorf("loading policy from database: %w", err)
|
||||
}
|
||||
|
||||
policy = pol.Data
|
||||
policyData = pol.Data
|
||||
} else {
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
ctx, client, conn, cancel, err := newHeadscaleCLIWithConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to headscale: %w", err)
|
||||
}
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.GetPolicyRequest{}
|
||||
|
||||
response, err := client.GetPolicy(ctx, request)
|
||||
response, err := client.GetPolicy(ctx, &v1.GetPolicyRequest{})
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed loading ACL Policy: %s", err), output)
|
||||
return fmt.Errorf("loading ACL policy: %w", err)
|
||||
}
|
||||
|
||||
policy = response.GetPolicy()
|
||||
policyData = response.GetPolicy()
|
||||
}
|
||||
|
||||
// TODO(pallabpain): Maybe print this better?
|
||||
// This does not pass output as we dont support yaml, json or json-line
|
||||
// output for this command. It is HuJSON already.
|
||||
SuccessOutput("", policy, "")
|
||||
// This does not pass output format as we don't support yaml, json or
|
||||
// json-line output for this command. It is HuJSON already.
|
||||
fmt.Println(policyData)
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -111,100 +111,79 @@ var setPolicy = &cobra.Command{
|
||||
Updates the existing ACL Policy with the provided policy. The policy must be a valid HuJSON object.
|
||||
This command only works when the acl.policy_mode is set to "db", and the policy will be stored in the database.`,
|
||||
Aliases: []string{"put", "update"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
policyPath, _ := cmd.Flags().GetString("file")
|
||||
|
||||
f, err := os.Open(policyPath)
|
||||
policyBytes, err := os.ReadFile(policyPath)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error opening the policy file: %s", err), output)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
policyBytes, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error reading the policy file: %s", err), output)
|
||||
return fmt.Errorf("reading policy file: %w", err)
|
||||
}
|
||||
|
||||
if bypass, _ := cmd.Flags().GetBool(bypassFlag); bypass {
|
||||
confirm := false
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
if !force {
|
||||
confirm = util.YesNo("DO NOT run this command if an instance of headscale is running, are you sure headscale is not running?")
|
||||
if !confirmAction(cmd, "DO NOT run this command if an instance of headscale is running, are you sure headscale is not running?") {
|
||||
return errAborted
|
||||
}
|
||||
|
||||
if !confirm && !force {
|
||||
ErrorOutput(nil, "Aborting command", output)
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := types.LoadServerConfig()
|
||||
d, err := bypassDatabase()
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed loading config: %s", err), output)
|
||||
}
|
||||
|
||||
d, err := db.NewHeadscaleDatabase(
|
||||
cfg,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to open database: %s", err), output)
|
||||
return err
|
||||
}
|
||||
defer d.Close()
|
||||
|
||||
users, err := d.ListUsers()
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to load users for policy validation: %s", err), output)
|
||||
return fmt.Errorf("loading users for policy validation: %w", err)
|
||||
}
|
||||
|
||||
_, err = policy.NewPolicyManager(policyBytes, users, views.Slice[types.NodeView]{})
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error parsing the policy file: %s", err), output)
|
||||
return
|
||||
return fmt.Errorf("parsing policy file: %w", err)
|
||||
}
|
||||
|
||||
_, err = d.SetPolicy(string(policyBytes))
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to set ACL Policy: %s", err), output)
|
||||
return fmt.Errorf("setting ACL policy: %w", err)
|
||||
}
|
||||
} else {
|
||||
request := &v1.SetPolicyRequest{Policy: string(policyBytes)}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
ctx, client, conn, cancel, err := newHeadscaleCLIWithConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to headscale: %w", err)
|
||||
}
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
if _, err := client.SetPolicy(ctx, request); err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to set ACL Policy: %s", err), output)
|
||||
_, err = client.SetPolicy(ctx, request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting ACL policy: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
SuccessOutput(nil, "Policy updated.", "")
|
||||
fmt.Println("Policy updated.")
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var checkPolicy = &cobra.Command{
|
||||
Use: "check",
|
||||
Short: "Check the Policy file for errors",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
policyPath, _ := cmd.Flags().GetString("file")
|
||||
|
||||
f, err := os.Open(policyPath)
|
||||
policyBytes, err := os.ReadFile(policyPath)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error opening the policy file: %s", err), output)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
policyBytes, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error reading the policy file: %s", err), output)
|
||||
return fmt.Errorf("reading policy file: %w", err)
|
||||
}
|
||||
|
||||
_, err = policy.NewPolicyManager(policyBytes, nil, views.Slice[types.NodeView]{})
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error parsing the policy file: %s", err), output)
|
||||
return fmt.Errorf("parsing policy file: %w", err)
|
||||
}
|
||||
|
||||
SuccessOutput(nil, "Policy is valid", "")
|
||||
fmt.Println("Policy is valid")
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -46,203 +45,134 @@ var listPreAuthKeys = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all preauthkeys",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
response, err := client.ListPreAuthKeys(ctx, &v1.ListPreAuthKeysRequest{})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting the list of keys: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
return fmt.Errorf("listing preauthkeys: %w", err)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetPreAuthKeys(), "", output)
|
||||
}
|
||||
|
||||
tableData := pterm.TableData{
|
||||
{
|
||||
"ID",
|
||||
"Key/Prefix",
|
||||
"Reusable",
|
||||
"Ephemeral",
|
||||
"Used",
|
||||
"Expiration",
|
||||
"Created",
|
||||
"Owner",
|
||||
},
|
||||
}
|
||||
for _, key := range response.GetPreAuthKeys() {
|
||||
expiration := "-"
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = ColourTime(key.GetExpiration().AsTime())
|
||||
return printListOutput(cmd, response.GetPreAuthKeys(), func() error {
|
||||
tableData := pterm.TableData{
|
||||
{
|
||||
"ID",
|
||||
"Key/Prefix",
|
||||
"Reusable",
|
||||
"Ephemeral",
|
||||
"Used",
|
||||
"Expiration",
|
||||
"Created",
|
||||
"Owner",
|
||||
},
|
||||
}
|
||||
|
||||
var owner string
|
||||
if len(key.GetAclTags()) > 0 {
|
||||
owner = strings.Join(key.GetAclTags(), "\n")
|
||||
} else if key.GetUser() != nil {
|
||||
owner = key.GetUser().GetName()
|
||||
} else {
|
||||
owner = "-"
|
||||
for _, key := range response.GetPreAuthKeys() {
|
||||
expiration := "-"
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = ColourTime(key.GetExpiration().AsTime())
|
||||
}
|
||||
|
||||
var owner string
|
||||
if len(key.GetAclTags()) > 0 {
|
||||
owner = strings.Join(key.GetAclTags(), "\n")
|
||||
} else if key.GetUser() != nil {
|
||||
owner = key.GetUser().GetName()
|
||||
} else {
|
||||
owner = "-"
|
||||
}
|
||||
|
||||
tableData = append(tableData, []string{
|
||||
strconv.FormatUint(key.GetId(), util.Base10),
|
||||
key.GetKey(),
|
||||
strconv.FormatBool(key.GetReusable()),
|
||||
strconv.FormatBool(key.GetEphemeral()),
|
||||
strconv.FormatBool(key.GetUsed()),
|
||||
expiration,
|
||||
key.GetCreatedAt().AsTime().Format(HeadscaleDateTimeFormat),
|
||||
owner,
|
||||
})
|
||||
}
|
||||
|
||||
tableData = append(tableData, []string{
|
||||
strconv.FormatUint(key.GetId(), 10),
|
||||
key.GetKey(),
|
||||
strconv.FormatBool(key.GetReusable()),
|
||||
strconv.FormatBool(key.GetEphemeral()),
|
||||
strconv.FormatBool(key.GetUsed()),
|
||||
expiration,
|
||||
key.GetCreatedAt().AsTime().Format("2006-01-02 15:04:05"),
|
||||
owner,
|
||||
})
|
||||
|
||||
}
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
return pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
})
|
||||
}),
|
||||
}
|
||||
|
||||
var createPreAuthKeyCmd = &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Creates a new preauthkey",
|
||||
Aliases: []string{"c", "new"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
user, _ := cmd.Flags().GetUint64("user")
|
||||
reusable, _ := cmd.Flags().GetBool("reusable")
|
||||
ephemeral, _ := cmd.Flags().GetBool("ephemeral")
|
||||
tags, _ := cmd.Flags().GetStringSlice("tags")
|
||||
|
||||
request := &v1.CreatePreAuthKeyRequest{
|
||||
User: user,
|
||||
Reusable: reusable,
|
||||
Ephemeral: ephemeral,
|
||||
AclTags: tags,
|
||||
}
|
||||
|
||||
durationStr, _ := cmd.Flags().GetString("expiration")
|
||||
|
||||
duration, err := model.ParseDuration(durationStr)
|
||||
expiration, err := expirationFromFlag(cmd)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Could not parse duration: %s\n", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
expiration := time.Now().UTC().Add(time.Duration(duration))
|
||||
|
||||
request.Expiration = timestamppb.New(expiration)
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
request := &v1.CreatePreAuthKeyRequest{
|
||||
User: user,
|
||||
Reusable: reusable,
|
||||
Ephemeral: ephemeral,
|
||||
AclTags: tags,
|
||||
Expiration: expiration,
|
||||
}
|
||||
|
||||
response, err := client.CreatePreAuthKey(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot create Pre Auth Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("creating preauthkey: %w", err)
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetPreAuthKey(), response.GetPreAuthKey().GetKey(), output)
|
||||
},
|
||||
return printOutput(cmd, response.GetPreAuthKey(), response.GetPreAuthKey().GetKey())
|
||||
}),
|
||||
}
|
||||
|
||||
var expirePreAuthKeyCmd = &cobra.Command{
|
||||
Use: "expire",
|
||||
Short: "Expire a preauthkey",
|
||||
Aliases: []string{"revoke", "exp", "e"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
id, _ := cmd.Flags().GetUint64("id")
|
||||
|
||||
if id == 0 {
|
||||
ErrorOutput(
|
||||
errMissingParameter,
|
||||
"Error: missing --id parameter",
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
return fmt.Errorf("missing --id parameter: %w", errMissingParameter)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpirePreAuthKeyRequest{
|
||||
Id: id,
|
||||
}
|
||||
|
||||
response, err := client.ExpirePreAuthKey(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot expire Pre Auth Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("expiring preauthkey: %w", err)
|
||||
}
|
||||
|
||||
SuccessOutput(response, "Key expired", output)
|
||||
},
|
||||
return printOutput(cmd, response, "Key expired")
|
||||
}),
|
||||
}
|
||||
|
||||
var deletePreAuthKeyCmd = &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete a preauthkey",
|
||||
Aliases: []string{"del", "rm", "d"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
id, _ := cmd.Flags().GetUint64("id")
|
||||
|
||||
if id == 0 {
|
||||
ErrorOutput(
|
||||
errMissingParameter,
|
||||
"Error: missing --id parameter",
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
return fmt.Errorf("missing --id parameter: %w", errMissingParameter)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.DeletePreAuthKeyRequest{
|
||||
Id: id,
|
||||
}
|
||||
|
||||
response, err := client.DeletePreAuthKey(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot delete Pre Auth Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("deleting preauthkey: %w", err)
|
||||
}
|
||||
|
||||
SuccessOutput(response, "Key deleted", output)
|
||||
},
|
||||
return printOutput(cmd, response, "Key deleted")
|
||||
}),
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
)
|
||||
|
||||
func ColourTime(date time.Time) string {
|
||||
dateStr := date.Format("2006-01-02 15:04:05")
|
||||
dateStr := date.Format(HeadscaleDateTimeFormat)
|
||||
|
||||
if date.After(time.Now()) {
|
||||
dateStr = pterm.LightGreen(dateStr)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
@@ -15,10 +14,6 @@ import (
|
||||
"github.com/tcnksm/go-latest"
|
||||
)
|
||||
|
||||
const (
|
||||
deprecateNamespaceMessage = "use --user"
|
||||
)
|
||||
|
||||
var cfgFile string = ""
|
||||
|
||||
func init() {
|
||||
@@ -39,25 +34,34 @@ func init() {
|
||||
StringP("output", "o", "", "Output format. Empty for human-readable, 'json', 'json-line' or 'yaml'")
|
||||
rootCmd.PersistentFlags().
|
||||
Bool("force", false, "Disable prompts and forces the execution")
|
||||
|
||||
// Re-enable usage output only for flag-parsing errors; runtime errors
|
||||
// from RunE should never dump usage text.
|
||||
rootCmd.SetFlagErrorFunc(func(cmd *cobra.Command, err error) error {
|
||||
cmd.SilenceUsage = false
|
||||
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func initConfig() {
|
||||
if cfgFile == "" {
|
||||
cfgFile = os.Getenv("HEADSCALE_CONFIG")
|
||||
}
|
||||
|
||||
if cfgFile != "" {
|
||||
err := types.LoadConfig(cfgFile, true)
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msgf("Error loading config file %s", cfgFile)
|
||||
log.Fatal().Caller().Err(err).Msgf("error loading config file %s", cfgFile)
|
||||
}
|
||||
} else {
|
||||
err := types.LoadConfig("", false)
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msgf("Error loading config")
|
||||
log.Fatal().Caller().Err(err).Msgf("error loading config")
|
||||
}
|
||||
}
|
||||
|
||||
machineOutput := HasMachineOutputFlag()
|
||||
machineOutput := hasMachineOutputFlag()
|
||||
|
||||
// If the user has requested a "node" readable format,
|
||||
// then disable login so the output remains valid.
|
||||
@@ -80,6 +84,7 @@ func initConfig() {
|
||||
Repository: "headscale",
|
||||
TagFilterFunc: filterPreReleasesIfStable(func() string { return versionInfo.Version }),
|
||||
}
|
||||
|
||||
res, err := latest.Check(githubTag, versionInfo.Version)
|
||||
if err == nil && res.Outdated {
|
||||
//nolint
|
||||
@@ -101,6 +106,7 @@ func isPreReleaseVersion(version string) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -137,11 +143,15 @@ var rootCmd = &cobra.Command{
|
||||
headscale is an open source implementation of the Tailscale control server
|
||||
|
||||
https://github.com/juanfont/headscale`,
|
||||
SilenceErrors: true,
|
||||
SilenceUsage: true,
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
cmd, err := rootCmd.ExecuteC()
|
||||
if err != nil {
|
||||
outputFormat, _ := cmd.Flags().GetString("output")
|
||||
printError(err, outputFormat)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tailscale/squibble"
|
||||
)
|
||||
@@ -17,24 +16,22 @@ func init() {
|
||||
var serveCmd = &cobra.Command{
|
||||
Use: "serve",
|
||||
Short: "Launches the headscale server",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
app, err := newHeadscaleServerWithConfig()
|
||||
if err != nil {
|
||||
var squibbleErr squibble.ValidationError
|
||||
if errors.As(err, &squibbleErr) {
|
||||
if squibbleErr, ok := errors.AsType[squibble.ValidationError](err); ok {
|
||||
fmt.Printf("SQLite schema failed to validate:\n")
|
||||
fmt.Println(squibbleErr.Diff)
|
||||
}
|
||||
|
||||
log.Fatal().Caller().Err(err).Msg("Error initializing")
|
||||
return fmt.Errorf("initializing: %w", err)
|
||||
}
|
||||
|
||||
err = app.Serve()
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
log.Fatal().Caller().Err(err).Msg("Headscale ran into an error and had to shut down.")
|
||||
return fmt.Errorf("headscale ran into an error and had to shut down: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
@@ -8,10 +9,16 @@ import (
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/juanfont/headscale/hscontrol/util/zlog/zf"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// CLI user errors.
|
||||
var (
|
||||
errFlagRequired = errors.New("--name or --identifier flag is required")
|
||||
errMultipleUsersMatch = errors.New("multiple users match query, specify an ID")
|
||||
)
|
||||
|
||||
func usernameAndIDFlag(cmd *cobra.Command) {
|
||||
@@ -20,20 +27,21 @@ func usernameAndIDFlag(cmd *cobra.Command) {
|
||||
}
|
||||
|
||||
// usernameAndIDFromFlag returns the username and ID from the flags of the command.
|
||||
// If both are empty, it will exit the program with an error.
|
||||
func usernameAndIDFromFlag(cmd *cobra.Command) (uint64, string) {
|
||||
func usernameAndIDFromFlag(cmd *cobra.Command) (uint64, string, error) {
|
||||
username, _ := cmd.Flags().GetString("name")
|
||||
|
||||
identifier, _ := cmd.Flags().GetInt64("identifier")
|
||||
if username == "" && identifier < 0 {
|
||||
err := errors.New("--name or --identifier flag is required")
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot rename user: "+status.Convert(err).Message(),
|
||||
"",
|
||||
)
|
||||
return 0, "", errFlagRequired
|
||||
}
|
||||
|
||||
return uint64(identifier), username
|
||||
// Normalise unset/negative identifiers to 0 so the uint64
|
||||
// conversion does not produce a bogus large value.
|
||||
if identifier < 0 {
|
||||
identifier = 0
|
||||
}
|
||||
|
||||
return uint64(identifier), username, nil //nolint:gosec // identifier is clamped to >= 0 above
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -50,15 +58,13 @@ func init() {
|
||||
userCmd.AddCommand(renameUserCmd)
|
||||
usernameAndIDFlag(renameUserCmd)
|
||||
renameUserCmd.Flags().StringP("new-name", "r", "", "New username")
|
||||
renameNodeCmd.MarkFlagRequired("new-name")
|
||||
mustMarkRequired(renameUserCmd, "new-name")
|
||||
}
|
||||
|
||||
var errMissingParameter = errors.New("missing parameters")
|
||||
|
||||
var userCmd = &cobra.Command{
|
||||
Use: "users",
|
||||
Short: "Manage the users of Headscale",
|
||||
Aliases: []string{"user", "namespace", "namespaces", "ns"},
|
||||
Aliases: []string{"user"},
|
||||
}
|
||||
|
||||
var createUserCmd = &cobra.Command{
|
||||
@@ -72,16 +78,10 @@ var createUserCmd = &cobra.Command{
|
||||
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
userName := args[0]
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
log.Trace().Interface("client", client).Msg("Obtained gRPC client")
|
||||
log.Trace().Interface(zf.Client, client).Msg("obtained gRPC client")
|
||||
|
||||
request := &v1.CreateUserRequest{Name: userName}
|
||||
|
||||
@@ -94,108 +94,73 @@ var createUserCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if pictureURL, _ := cmd.Flags().GetString("picture-url"); pictureURL != "" {
|
||||
if _, err := url.Parse(pictureURL); err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Invalid Picture URL: %s",
|
||||
err,
|
||||
),
|
||||
output,
|
||||
)
|
||||
if _, err := url.Parse(pictureURL); err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("invalid picture URL: %w", err)
|
||||
}
|
||||
|
||||
request.PictureUrl = pictureURL
|
||||
}
|
||||
|
||||
log.Trace().Interface("request", request).Msg("Sending CreateUser request")
|
||||
log.Trace().Interface(zf.Request, request).Msg("sending CreateUser request")
|
||||
|
||||
response, err := client.CreateUser(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot create user: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("creating user: %w", err)
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetUser(), "User created", output)
|
||||
},
|
||||
return printOutput(cmd, response.GetUser(), "User created")
|
||||
}),
|
||||
}
|
||||
|
||||
var destroyUserCmd = &cobra.Command{
|
||||
Use: "destroy --identifier ID or --name NAME",
|
||||
Short: "Destroys a user",
|
||||
Aliases: []string{"delete"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
id, username, err := usernameAndIDFromFlag(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
id, username := usernameAndIDFromFlag(cmd)
|
||||
request := &v1.ListUsersRequest{
|
||||
Name: username,
|
||||
Id: id,
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
users, err := client.ListUsers(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("listing users: %w", err)
|
||||
}
|
||||
|
||||
if len(users.GetUsers()) != 1 {
|
||||
err := errors.New("Unable to determine user to delete, query returned multiple users, use ID")
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return errMultipleUsersMatch
|
||||
}
|
||||
|
||||
user := users.GetUsers()[0]
|
||||
|
||||
confirm := false
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
if !force {
|
||||
confirm = util.YesNo(fmt.Sprintf(
|
||||
"Do you want to remove the user %q (%d) and any associated preauthkeys?",
|
||||
user.GetName(), user.GetId(),
|
||||
))
|
||||
if !confirmAction(cmd, fmt.Sprintf(
|
||||
"Do you want to remove the user %q (%d) and any associated preauthkeys?",
|
||||
user.GetName(), user.GetId(),
|
||||
)) {
|
||||
return printOutput(cmd, map[string]string{"Result": "User not destroyed"}, "User not destroyed")
|
||||
}
|
||||
|
||||
if confirm || force {
|
||||
request := &v1.DeleteUserRequest{Id: user.GetId()}
|
||||
deleteRequest := &v1.DeleteUserRequest{Id: user.GetId()}
|
||||
|
||||
response, err := client.DeleteUser(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot destroy user: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
}
|
||||
SuccessOutput(response, "User destroyed", output)
|
||||
} else {
|
||||
SuccessOutput(map[string]string{"Result": "User not destroyed"}, "User not destroyed", output)
|
||||
response, err := client.DeleteUser(ctx, deleteRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("destroying user: %w", err)
|
||||
}
|
||||
},
|
||||
|
||||
return printOutput(cmd, response, "User destroyed")
|
||||
}),
|
||||
}
|
||||
|
||||
var listUsersCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all the users",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
request := &v1.ListUsersRequest{}
|
||||
|
||||
id, _ := cmd.Flags().GetInt64("identifier")
|
||||
@@ -214,53 +179,39 @@ var listUsersCmd = &cobra.Command{
|
||||
|
||||
response, err := client.ListUsers(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot get users: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("listing users: %w", err)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetUsers(), "", output)
|
||||
}
|
||||
return printListOutput(cmd, response.GetUsers(), func() error {
|
||||
tableData := pterm.TableData{{"ID", "Name", "Username", "Email", "Created"}}
|
||||
for _, user := range response.GetUsers() {
|
||||
tableData = append(
|
||||
tableData,
|
||||
[]string{
|
||||
strconv.FormatUint(user.GetId(), util.Base10),
|
||||
user.GetDisplayName(),
|
||||
user.GetName(),
|
||||
user.GetEmail(),
|
||||
user.GetCreatedAt().AsTime().Format(HeadscaleDateTimeFormat),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
tableData := pterm.TableData{{"ID", "Name", "Username", "Email", "Created"}}
|
||||
for _, user := range response.GetUsers() {
|
||||
tableData = append(
|
||||
tableData,
|
||||
[]string{
|
||||
strconv.FormatUint(user.GetId(), 10),
|
||||
user.GetDisplayName(),
|
||||
user.GetName(),
|
||||
user.GetEmail(),
|
||||
user.GetCreatedAt().AsTime().Format("2006-01-02 15:04:05"),
|
||||
},
|
||||
)
|
||||
}
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
return pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
})
|
||||
}),
|
||||
}
|
||||
|
||||
var renameUserCmd = &cobra.Command{
|
||||
Use: "rename",
|
||||
Short: "Renames a user",
|
||||
Aliases: []string{"mv"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
RunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {
|
||||
id, username, err := usernameAndIDFromFlag(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
id, username := usernameAndIDFromFlag(cmd)
|
||||
listReq := &v1.ListUsersRequest{
|
||||
Name: username,
|
||||
Id: id,
|
||||
@@ -268,20 +219,11 @@ var renameUserCmd = &cobra.Command{
|
||||
|
||||
users, err := client.ListUsers(ctx, listReq)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("listing users: %w", err)
|
||||
}
|
||||
|
||||
if len(users.GetUsers()) != 1 {
|
||||
err := errors.New("Unable to determine user to delete, query returned multiple users, use ID")
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return errMultipleUsersMatch
|
||||
}
|
||||
|
||||
newName, _ := cmd.Flags().GetString("new-name")
|
||||
@@ -293,13 +235,9 @@ var renameUserCmd = &cobra.Command{
|
||||
|
||||
response, err := client.RenameUser(ctx, renameReq)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot rename user: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return fmt.Errorf("renaming user: %w", err)
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetUser(), "User renamed", output)
|
||||
},
|
||||
return printOutput(cmd, response.GetUser(), "User renamed")
|
||||
}),
|
||||
}
|
||||
|
||||
@@ -4,25 +4,52 @@ import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/juanfont/headscale/hscontrol/util/zlog/zf"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const (
|
||||
HeadscaleDateTimeFormat = "2006-01-02 15:04:05"
|
||||
SocketWritePermissions = 0o666
|
||||
|
||||
outputFormatJSON = "json"
|
||||
outputFormatJSONLine = "json-line"
|
||||
outputFormatYAML = "yaml"
|
||||
)
|
||||
|
||||
var (
|
||||
errAPIKeyNotSet = errors.New("HEADSCALE_CLI_API_KEY environment variable needs to be set")
|
||||
errMissingParameter = errors.New("missing parameters")
|
||||
)
|
||||
|
||||
// mustMarkRequired marks the named flags as required on cmd, panicking
|
||||
// if any name does not match a registered flag. This is only called
|
||||
// from init() where a failure indicates a programming error.
|
||||
func mustMarkRequired(cmd *cobra.Command, names ...string) {
|
||||
for _, n := range names {
|
||||
err := cmd.MarkFlagRequired(n)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("marking flag %q required on %q: %v", n, cmd.Name(), err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newHeadscaleServerWithConfig() (*hscontrol.Headscale, error) {
|
||||
cfg, err := types.LoadServerConfig()
|
||||
if err != nil {
|
||||
@@ -40,14 +67,28 @@ func newHeadscaleServerWithConfig() (*hscontrol.Headscale, error) {
|
||||
return app, nil
|
||||
}
|
||||
|
||||
func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *grpc.ClientConn, context.CancelFunc) {
|
||||
// grpcRunE wraps a cobra RunE func, injecting a ready gRPC client and
|
||||
// context. Connection lifecycle is managed by the wrapper — callers
|
||||
// never see the underlying conn or cancel func.
|
||||
func grpcRunE(
|
||||
fn func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error,
|
||||
) func(*cobra.Command, []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
ctx, client, conn, cancel, err := newHeadscaleCLIWithConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to headscale: %w", err)
|
||||
}
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
return fn(ctx, client, cmd, args)
|
||||
}
|
||||
}
|
||||
|
||||
func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *grpc.ClientConn, context.CancelFunc, error) {
|
||||
cfg, err := types.LoadCLIConfig()
|
||||
if err != nil {
|
||||
log.Fatal().
|
||||
Err(err).
|
||||
Caller().
|
||||
Msgf("Failed to load configuration")
|
||||
os.Exit(-1) // we get here if logging is suppressed (i.e., json output)
|
||||
return nil, nil, nil, nil, fmt.Errorf("loading configuration: %w", err)
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
@@ -57,7 +98,7 @@ func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *g
|
||||
ctx, cancel := context.WithTimeout(context.Background(), cfg.CLI.Timeout)
|
||||
|
||||
grpcOptions := []grpc.DialOption{
|
||||
grpc.WithBlock(),
|
||||
grpc.WithBlock(), //nolint:staticcheck // SA1019: deprecated but supported in 1.x
|
||||
}
|
||||
|
||||
address := cfg.CLI.Address
|
||||
@@ -71,17 +112,23 @@ func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *g
|
||||
address = cfg.UnixSocket
|
||||
|
||||
// Try to give the user better feedback if we cannot write to the headscale
|
||||
// socket.
|
||||
socket, err := os.OpenFile(cfg.UnixSocket, os.O_WRONLY, SocketWritePermissions) // nolint
|
||||
// socket. Note: os.OpenFile on a Unix domain socket returns ENXIO on
|
||||
// Linux which is expected — only permission errors are actionable here.
|
||||
// The actual gRPC connection uses net.Dial which handles sockets properly.
|
||||
socket, err := os.OpenFile(cfg.UnixSocket, os.O_WRONLY, SocketWritePermissions) //nolint
|
||||
if err != nil {
|
||||
if os.IsPermission(err) {
|
||||
log.Fatal().
|
||||
Err(err).
|
||||
Str("socket", cfg.UnixSocket).
|
||||
Msgf("Unable to read/write to headscale socket, do you have the correct permissions?")
|
||||
cancel()
|
||||
|
||||
return nil, nil, nil, nil, fmt.Errorf(
|
||||
"unable to read/write to headscale socket %q, do you have the correct permissions? %w",
|
||||
cfg.UnixSocket,
|
||||
err,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
socket.Close()
|
||||
}
|
||||
socket.Close()
|
||||
|
||||
grpcOptions = append(
|
||||
grpcOptions,
|
||||
@@ -92,8 +139,11 @@ func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *g
|
||||
// If we are not connecting to a local server, require an API key for authentication
|
||||
apiKey := cfg.CLI.APIKey
|
||||
if apiKey == "" {
|
||||
log.Fatal().Caller().Msgf("HEADSCALE_CLI_API_KEY environment variable needs to be set.")
|
||||
cancel()
|
||||
|
||||
return nil, nil, nil, nil, errAPIKeyNotSet
|
||||
}
|
||||
|
||||
grpcOptions = append(grpcOptions,
|
||||
grpc.WithPerRPCCredentials(tokenAuth{
|
||||
token: apiKey,
|
||||
@@ -118,71 +168,136 @@ func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *g
|
||||
}
|
||||
}
|
||||
|
||||
log.Trace().Caller().Str("address", address).Msg("Connecting via gRPC")
|
||||
conn, err := grpc.DialContext(ctx, address, grpcOptions...)
|
||||
log.Trace().Caller().Str(zf.Address, address).Msg("connecting via gRPC")
|
||||
|
||||
conn, err := grpc.DialContext(ctx, address, grpcOptions...) //nolint:staticcheck // SA1019: deprecated but supported in 1.x
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msgf("Could not connect: %v", err)
|
||||
os.Exit(-1) // we get here if logging is suppressed (i.e., json output)
|
||||
cancel()
|
||||
|
||||
return nil, nil, nil, nil, fmt.Errorf("connecting to %s: %w", address, err)
|
||||
}
|
||||
|
||||
client := v1.NewHeadscaleServiceClient(conn)
|
||||
|
||||
return ctx, client, conn, cancel
|
||||
return ctx, client, conn, cancel, nil
|
||||
}
|
||||
|
||||
func output(result any, override string, outputFormat string) string {
|
||||
var jsonBytes []byte
|
||||
var err error
|
||||
// formatOutput serialises result into the requested format. For the
|
||||
// default (empty) format the human-readable override string is returned.
|
||||
func formatOutput(result any, override string, outputFormat string) (string, error) {
|
||||
switch outputFormat {
|
||||
case "json":
|
||||
jsonBytes, err = json.MarshalIndent(result, "", "\t")
|
||||
case outputFormatJSON:
|
||||
b, err := json.MarshalIndent(result, "", "\t")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("failed to unmarshal output")
|
||||
return "", fmt.Errorf("marshalling JSON output: %w", err)
|
||||
}
|
||||
case "json-line":
|
||||
jsonBytes, err = json.Marshal(result)
|
||||
|
||||
return string(b), nil
|
||||
case outputFormatJSONLine:
|
||||
b, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("failed to unmarshal output")
|
||||
return "", fmt.Errorf("marshalling JSON-line output: %w", err)
|
||||
}
|
||||
case "yaml":
|
||||
jsonBytes, err = yaml.Marshal(result)
|
||||
|
||||
return string(b), nil
|
||||
case outputFormatYAML:
|
||||
b, err := yaml.Marshal(result)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("failed to unmarshal output")
|
||||
return "", fmt.Errorf("marshalling YAML output: %w", err)
|
||||
}
|
||||
|
||||
return string(b), nil
|
||||
default:
|
||||
// nolint
|
||||
return override
|
||||
return override, nil
|
||||
}
|
||||
}
|
||||
|
||||
// printOutput formats result and writes it to stdout. It reads the --output
|
||||
// flag from cmd to decide the serialisation format.
|
||||
func printOutput(cmd *cobra.Command, result any, override string) error {
|
||||
format, _ := cmd.Flags().GetString("output")
|
||||
|
||||
out, err := formatOutput(result, override, format)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return string(jsonBytes)
|
||||
fmt.Println(out)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SuccessOutput prints the result to stdout and exits with status code 0.
|
||||
func SuccessOutput(result any, override string, outputFormat string) {
|
||||
fmt.Println(output(result, override, outputFormat))
|
||||
os.Exit(0)
|
||||
// expirationFromFlag parses the --expiration flag as a Prometheus-style
|
||||
// duration (e.g. "90d", "1h") and returns an absolute timestamp.
|
||||
func expirationFromFlag(cmd *cobra.Command) (*timestamppb.Timestamp, error) {
|
||||
durationStr, _ := cmd.Flags().GetString("expiration")
|
||||
|
||||
duration, err := model.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing duration: %w", err)
|
||||
}
|
||||
|
||||
return timestamppb.New(time.Now().UTC().Add(time.Duration(duration))), nil
|
||||
}
|
||||
|
||||
// ErrorOutput prints an error message to stderr and exits with status code 1.
|
||||
func ErrorOutput(errResult error, override string, outputFormat string) {
|
||||
// confirmAction returns true when the user confirms a prompt, or when
|
||||
// --force is set. Callers decide what to do when it returns false.
|
||||
func confirmAction(cmd *cobra.Command, prompt string) bool {
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
if force {
|
||||
return true
|
||||
}
|
||||
|
||||
return util.YesNo(prompt)
|
||||
}
|
||||
|
||||
// printListOutput checks the --output flag: when a machine-readable format is
|
||||
// requested it serialises data as JSON/YAML; otherwise it calls renderTable
|
||||
// to produce the human-readable pterm table.
|
||||
func printListOutput(
|
||||
cmd *cobra.Command,
|
||||
data any,
|
||||
renderTable func() error,
|
||||
) error {
|
||||
format, _ := cmd.Flags().GetString("output")
|
||||
if format != "" {
|
||||
return printOutput(cmd, data, "")
|
||||
}
|
||||
|
||||
return renderTable()
|
||||
}
|
||||
|
||||
// printError writes err to stderr, formatting it as JSON/YAML when the
|
||||
// --output flag requests machine-readable output. Used exclusively by
|
||||
// Execute() so that every error surfaces in the format the caller asked for.
|
||||
func printError(err error, outputFormat string) {
|
||||
type errOutput struct {
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
var errorMessage string
|
||||
if errResult != nil {
|
||||
errorMessage = errResult.Error()
|
||||
} else {
|
||||
errorMessage = override
|
||||
e := errOutput{Error: err.Error()}
|
||||
|
||||
var formatted []byte
|
||||
|
||||
switch outputFormat {
|
||||
case outputFormatJSON:
|
||||
formatted, _ = json.MarshalIndent(e, "", "\t") //nolint:errchkjson // errOutput contains only a string field
|
||||
case outputFormatJSONLine:
|
||||
formatted, _ = json.Marshal(e) //nolint:errchkjson // errOutput contains only a string field
|
||||
case outputFormatYAML:
|
||||
formatted, _ = yaml.Marshal(e)
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%s\n", output(errOutput{errorMessage}, override, outputFormat))
|
||||
os.Exit(1)
|
||||
fmt.Fprintf(os.Stderr, "%s\n", formatted)
|
||||
}
|
||||
|
||||
func HasMachineOutputFlag() bool {
|
||||
func hasMachineOutputFlag() bool {
|
||||
for _, arg := range os.Args {
|
||||
if arg == "json" || arg == "json-line" || arg == "yaml" {
|
||||
if arg == outputFormatJSON || arg == outputFormatJSONLine || arg == outputFormatYAML {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,11 +14,9 @@ var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print the version.",
|
||||
Long: "The version of headscale.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
info := types.GetVersionInfo()
|
||||
|
||||
SuccessOutput(info, info.String(), output)
|
||||
return printOutput(cmd, info, info.String())
|
||||
},
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
func main() {
|
||||
var colors bool
|
||||
|
||||
switch l := termcolor.SupportLevel(os.Stderr); l {
|
||||
case termcolor.Level16M:
|
||||
colors = true
|
||||
|
||||
@@ -14,9 +14,7 @@ import (
|
||||
)
|
||||
|
||||
func TestConfigFileLoading(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "headscale")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
path, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
@@ -48,9 +46,7 @@ func TestConfigFileLoading(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConfigLoading(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "headscale")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
path, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -22,11 +22,11 @@ import (
|
||||
func cleanupBeforeTest(ctx context.Context) error {
|
||||
err := cleanupStaleTestContainers(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to clean stale test containers: %w", err)
|
||||
return fmt.Errorf("cleaning stale test containers: %w", err)
|
||||
}
|
||||
|
||||
if err := pruneDockerNetworks(ctx); err != nil {
|
||||
return fmt.Errorf("failed to prune networks: %w", err)
|
||||
if err := pruneDockerNetworks(ctx); err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("pruning networks: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -39,14 +39,14 @@ func cleanupAfterTest(ctx context.Context, cli *client.Client, containerID, runI
|
||||
Force: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove test container: %w", err)
|
||||
return fmt.Errorf("removing test container: %w", err)
|
||||
}
|
||||
|
||||
// Clean up integration test containers for this run only
|
||||
if runID != "" {
|
||||
err := killTestContainersByRunID(ctx, runID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to clean up containers for run %s: %w", runID, err)
|
||||
return fmt.Errorf("cleaning up containers for run %s: %w", runID, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,9 +55,9 @@ func cleanupAfterTest(ctx context.Context, cli *client.Client, containerID, runI
|
||||
|
||||
// killTestContainers terminates and removes all test containers.
|
||||
func killTestContainers(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
@@ -65,12 +65,14 @@ func killTestContainers(ctx context.Context) error {
|
||||
All: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers: %w", err)
|
||||
return fmt.Errorf("listing containers: %w", err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
|
||||
for _, cont := range containers {
|
||||
shouldRemove := false
|
||||
|
||||
for _, name := range cont.Names {
|
||||
if strings.Contains(name, "headscale-test-suite") ||
|
||||
strings.Contains(name, "hs-") ||
|
||||
@@ -107,9 +109,9 @@ func killTestContainers(ctx context.Context) error {
|
||||
// This function filters containers by the hi.run-id label to only affect containers
|
||||
// belonging to the specified test run, leaving other concurrent test runs untouched.
|
||||
func killTestContainersByRunID(ctx context.Context, runID string) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
@@ -121,7 +123,7 @@ func killTestContainersByRunID(ctx context.Context, runID string) error {
|
||||
),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers for run %s: %w", runID, err)
|
||||
return fmt.Errorf("listing containers for run %s: %w", runID, err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
@@ -149,9 +151,9 @@ func killTestContainersByRunID(ctx context.Context, runID string) error {
|
||||
// This is useful for cleaning up leftover containers from previous crashed or interrupted test runs
|
||||
// without interfering with currently running concurrent tests.
|
||||
func cleanupStaleTestContainers(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
@@ -164,7 +166,7 @@ func cleanupStaleTestContainers(ctx context.Context) error {
|
||||
),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list stopped containers: %w", err)
|
||||
return fmt.Errorf("listing stopped containers: %w", err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
@@ -223,15 +225,15 @@ func removeContainerWithRetry(ctx context.Context, cli *client.Client, container
|
||||
|
||||
// pruneDockerNetworks removes unused Docker networks.
|
||||
func pruneDockerNetworks(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
report, err := cli.NetworksPrune(ctx, filters.Args{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prune networks: %w", err)
|
||||
return fmt.Errorf("pruning networks: %w", err)
|
||||
}
|
||||
|
||||
if len(report.NetworksDeleted) > 0 {
|
||||
@@ -245,9 +247,9 @@ func pruneDockerNetworks(ctx context.Context) error {
|
||||
|
||||
// cleanOldImages removes test-related and old dangling Docker images.
|
||||
func cleanOldImages(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
@@ -255,12 +257,14 @@ func cleanOldImages(ctx context.Context) error {
|
||||
All: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list images: %w", err)
|
||||
return fmt.Errorf("listing images: %w", err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
|
||||
for _, img := range images {
|
||||
shouldRemove := false
|
||||
|
||||
for _, tag := range img.RepoTags {
|
||||
if strings.Contains(tag, "hs-") ||
|
||||
strings.Contains(tag, "headscale-integration") ||
|
||||
@@ -295,18 +299,19 @@ func cleanOldImages(ctx context.Context) error {
|
||||
|
||||
// cleanCacheVolume removes the Docker volume used for Go module cache.
|
||||
func cleanCacheVolume(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
volumeName := "hs-integration-go-cache"
|
||||
|
||||
err = cli.VolumeRemove(ctx, volumeName, true)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
if errdefs.IsNotFound(err) { //nolint:staticcheck // SA1019: deprecated but functional
|
||||
fmt.Printf("Go module cache volume not found: %s\n", volumeName)
|
||||
} else if errdefs.IsConflict(err) {
|
||||
} else if errdefs.IsConflict(err) { //nolint:staticcheck // SA1019: deprecated but functional
|
||||
fmt.Printf("Go module cache volume is in use and cannot be removed: %s\n", volumeName)
|
||||
} else {
|
||||
fmt.Printf("Failed to remove Go module cache volume %s: %v\n", volumeName, err)
|
||||
@@ -330,7 +335,7 @@ func cleanCacheVolume(ctx context.Context) error {
|
||||
func cleanupSuccessfulTestArtifacts(logsDir string, verbose bool) error {
|
||||
entries, err := os.ReadDir(logsDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read logs directory: %w", err)
|
||||
return fmt.Errorf("reading logs directory: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
148
cmd/hi/docker.go
148
cmd/hi/docker.go
@@ -22,17 +22,22 @@ import (
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
)
|
||||
|
||||
const defaultDirPerm = 0o755
|
||||
|
||||
var (
|
||||
ErrTestFailed = errors.New("test failed")
|
||||
ErrUnexpectedContainerWait = errors.New("unexpected end of container wait")
|
||||
ErrNoDockerContext = errors.New("no docker context found")
|
||||
ErrMemoryLimitViolations = errors.New("container(s) exceeded memory limits")
|
||||
)
|
||||
|
||||
// runTestContainer executes integration tests in a Docker container.
|
||||
//
|
||||
//nolint:gocyclo // complex test orchestration function
|
||||
func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
@@ -48,19 +53,21 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
|
||||
absLogsDir, err := filepath.Abs(logsDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get absolute path for logs directory: %w", err)
|
||||
return fmt.Errorf("getting absolute path for logs directory: %w", err)
|
||||
}
|
||||
|
||||
const dirPerm = 0o755
|
||||
if err := os.MkdirAll(absLogsDir, dirPerm); err != nil {
|
||||
return fmt.Errorf("failed to create logs directory: %w", err)
|
||||
if err := os.MkdirAll(absLogsDir, dirPerm); err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("creating logs directory: %w", err)
|
||||
}
|
||||
|
||||
if config.CleanBefore {
|
||||
if config.Verbose {
|
||||
log.Printf("Running pre-test cleanup...")
|
||||
}
|
||||
if err := cleanupBeforeTest(ctx); err != nil && config.Verbose {
|
||||
|
||||
err := cleanupBeforeTest(ctx)
|
||||
if err != nil && config.Verbose {
|
||||
log.Printf("Warning: pre-test cleanup failed: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -71,21 +78,21 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
}
|
||||
|
||||
imageName := "golang:" + config.GoVersion
|
||||
if err := ensureImageAvailable(ctx, cli, imageName, config.Verbose); err != nil {
|
||||
return fmt.Errorf("failed to ensure image availability: %w", err)
|
||||
if err := ensureImageAvailable(ctx, cli, imageName, config.Verbose); err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("ensuring image availability: %w", err)
|
||||
}
|
||||
|
||||
resp, err := createGoTestContainer(ctx, cli, config, containerName, absLogsDir, goTestCmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create container: %w", err)
|
||||
return fmt.Errorf("creating container: %w", err)
|
||||
}
|
||||
|
||||
if config.Verbose {
|
||||
log.Printf("Created container: %s", resp.ID)
|
||||
}
|
||||
|
||||
if err := cli.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to start container: %w", err)
|
||||
if err := cli.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("starting container: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("Starting test: %s", config.TestPattern)
|
||||
@@ -95,13 +102,16 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
|
||||
// Start stats collection for container resource monitoring (if enabled)
|
||||
var statsCollector *StatsCollector
|
||||
|
||||
if config.Stats {
|
||||
var err error
|
||||
statsCollector, err = NewStatsCollector()
|
||||
|
||||
statsCollector, err = NewStatsCollector(ctx)
|
||||
if err != nil {
|
||||
if config.Verbose {
|
||||
log.Printf("Warning: failed to create stats collector: %v", err)
|
||||
}
|
||||
|
||||
statsCollector = nil
|
||||
}
|
||||
|
||||
@@ -110,7 +120,8 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
|
||||
// Start stats collection immediately - no need for complex retry logic
|
||||
// The new implementation monitors Docker events and will catch containers as they start
|
||||
if err := statsCollector.StartCollection(ctx, runID, config.Verbose); err != nil {
|
||||
err := statsCollector.StartCollection(ctx, runID, config.Verbose)
|
||||
if err != nil {
|
||||
if config.Verbose {
|
||||
log.Printf("Warning: failed to start stats collection: %v", err)
|
||||
}
|
||||
@@ -122,12 +133,13 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
exitCode, err := streamAndWait(ctx, cli, resp.ID)
|
||||
|
||||
// Ensure all containers have finished and logs are flushed before extracting artifacts
|
||||
if waitErr := waitForContainerFinalization(ctx, cli, resp.ID, config.Verbose); waitErr != nil && config.Verbose {
|
||||
waitErr := waitForContainerFinalization(ctx, cli, resp.ID, config.Verbose)
|
||||
if waitErr != nil && config.Verbose {
|
||||
log.Printf("Warning: failed to wait for container finalization: %v", waitErr)
|
||||
}
|
||||
|
||||
// Extract artifacts from test containers before cleanup
|
||||
if err := extractArtifactsFromContainers(ctx, resp.ID, logsDir, config.Verbose); err != nil && config.Verbose {
|
||||
if err := extractArtifactsFromContainers(ctx, resp.ID, logsDir, config.Verbose); err != nil && config.Verbose { //nolint:noinlineerr
|
||||
log.Printf("Warning: failed to extract artifacts from containers: %v", err)
|
||||
}
|
||||
|
||||
@@ -140,12 +152,13 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
if len(violations) > 0 {
|
||||
log.Printf("MEMORY LIMIT VIOLATIONS DETECTED:")
|
||||
log.Printf("=================================")
|
||||
|
||||
for _, violation := range violations {
|
||||
log.Printf("Container %s exceeded memory limit: %.1f MB > %.1f MB",
|
||||
violation.ContainerName, violation.MaxMemoryMB, violation.LimitMB)
|
||||
}
|
||||
|
||||
return fmt.Errorf("test failed: %d container(s) exceeded memory limits", len(violations))
|
||||
return fmt.Errorf("test failed: %d %w", len(violations), ErrMemoryLimitViolations)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -176,7 +189,7 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("test execution failed: %w", err)
|
||||
return fmt.Errorf("executing test: %w", err)
|
||||
}
|
||||
|
||||
if exitCode != 0 {
|
||||
@@ -210,7 +223,7 @@ func buildGoTestCommand(config *RunConfig) []string {
|
||||
func createGoTestContainer(ctx context.Context, cli *client.Client, config *RunConfig, containerName, logsDir string, goTestCmd []string) (container.CreateResponse, error) {
|
||||
pwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return container.CreateResponse{}, fmt.Errorf("failed to get working directory: %w", err)
|
||||
return container.CreateResponse{}, fmt.Errorf("getting working directory: %w", err)
|
||||
}
|
||||
|
||||
projectRoot := findProjectRoot(pwd)
|
||||
@@ -312,7 +325,7 @@ func streamAndWait(ctx context.Context, cli *client.Client, containerID string)
|
||||
Follow: true,
|
||||
})
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("failed to get container logs: %w", err)
|
||||
return -1, fmt.Errorf("getting container logs: %w", err)
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
@@ -324,7 +337,7 @@ func streamAndWait(ctx context.Context, cli *client.Client, containerID string)
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error waiting for container: %w", err)
|
||||
return -1, fmt.Errorf("waiting for container: %w", err)
|
||||
}
|
||||
case status := <-statusCh:
|
||||
return int(status.StatusCode), nil
|
||||
@@ -338,7 +351,7 @@ func waitForContainerFinalization(ctx context.Context, cli *client.Client, testC
|
||||
// First, get all related test containers
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers: %w", err)
|
||||
return fmt.Errorf("listing containers: %w", err)
|
||||
}
|
||||
|
||||
testContainers := getCurrentTestContainers(containers, testContainerID, verbose)
|
||||
@@ -347,6 +360,7 @@ func waitForContainerFinalization(ctx context.Context, cli *client.Client, testC
|
||||
maxWaitTime := 10 * time.Second
|
||||
checkInterval := 500 * time.Millisecond
|
||||
timeout := time.After(maxWaitTime)
|
||||
|
||||
ticker := time.NewTicker(checkInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
@@ -356,6 +370,7 @@ func waitForContainerFinalization(ctx context.Context, cli *client.Client, testC
|
||||
if verbose {
|
||||
log.Printf("Timeout waiting for container finalization, proceeding with artifact extraction")
|
||||
}
|
||||
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
allFinalized := true
|
||||
@@ -366,12 +381,14 @@ func waitForContainerFinalization(ctx context.Context, cli *client.Client, testC
|
||||
if verbose {
|
||||
log.Printf("Warning: failed to inspect container %s: %v", testCont.name, err)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if container is in a final state
|
||||
if !isContainerFinalized(inspect.State) {
|
||||
allFinalized = false
|
||||
|
||||
if verbose {
|
||||
log.Printf("Container %s still finalizing (state: %s)", testCont.name, inspect.State.Status)
|
||||
}
|
||||
@@ -384,6 +401,7 @@ func waitForContainerFinalization(ctx context.Context, cli *client.Client, testC
|
||||
if verbose {
|
||||
log.Printf("All test containers finalized, ready for artifact extraction")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -400,13 +418,15 @@ func isContainerFinalized(state *container.State) bool {
|
||||
func findProjectRoot(startPath string) string {
|
||||
current := startPath
|
||||
for {
|
||||
if _, err := os.Stat(filepath.Join(current, "go.mod")); err == nil {
|
||||
if _, err := os.Stat(filepath.Join(current, "go.mod")); err == nil { //nolint:noinlineerr
|
||||
return current
|
||||
}
|
||||
|
||||
parent := filepath.Dir(current)
|
||||
if parent == current {
|
||||
return startPath
|
||||
}
|
||||
|
||||
current = parent
|
||||
}
|
||||
}
|
||||
@@ -416,6 +436,7 @@ func boolToInt(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -428,13 +449,14 @@ type DockerContext struct {
|
||||
}
|
||||
|
||||
// createDockerClient creates a Docker client with context detection.
|
||||
func createDockerClient() (*client.Client, error) {
|
||||
contextInfo, err := getCurrentDockerContext()
|
||||
func createDockerClient(ctx context.Context) (*client.Client, error) {
|
||||
contextInfo, err := getCurrentDockerContext(ctx)
|
||||
if err != nil {
|
||||
return client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
}
|
||||
|
||||
var clientOpts []client.Opt
|
||||
|
||||
clientOpts = append(clientOpts, client.WithAPIVersionNegotiation())
|
||||
|
||||
if contextInfo != nil {
|
||||
@@ -444,6 +466,7 @@ func createDockerClient() (*client.Client, error) {
|
||||
if runConfig.Verbose {
|
||||
log.Printf("Using Docker host from context '%s': %s", contextInfo.Name, host)
|
||||
}
|
||||
|
||||
clientOpts = append(clientOpts, client.WithHost(host))
|
||||
}
|
||||
}
|
||||
@@ -458,16 +481,17 @@ func createDockerClient() (*client.Client, error) {
|
||||
}
|
||||
|
||||
// getCurrentDockerContext retrieves the current Docker context information.
|
||||
func getCurrentDockerContext() (*DockerContext, error) {
|
||||
cmd := exec.Command("docker", "context", "inspect")
|
||||
func getCurrentDockerContext(ctx context.Context) (*DockerContext, error) {
|
||||
cmd := exec.CommandContext(ctx, "docker", "context", "inspect")
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get docker context: %w", err)
|
||||
return nil, fmt.Errorf("getting docker context: %w", err)
|
||||
}
|
||||
|
||||
var contexts []DockerContext
|
||||
if err := json.Unmarshal(output, &contexts); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse docker context: %w", err)
|
||||
if err := json.Unmarshal(output, &contexts); err != nil { //nolint:noinlineerr
|
||||
return nil, fmt.Errorf("parsing docker context: %w", err)
|
||||
}
|
||||
|
||||
if len(contexts) > 0 {
|
||||
@@ -486,12 +510,13 @@ func getDockerSocketPath() string {
|
||||
|
||||
// checkImageAvailableLocally checks if the specified Docker image is available locally.
|
||||
func checkImageAvailableLocally(ctx context.Context, cli *client.Client, imageName string) (bool, error) {
|
||||
_, _, err := cli.ImageInspectWithRaw(ctx, imageName)
|
||||
_, _, err := cli.ImageInspectWithRaw(ctx, imageName) //nolint:staticcheck // SA1019: deprecated but functional
|
||||
if err != nil {
|
||||
if client.IsErrNotFound(err) {
|
||||
if client.IsErrNotFound(err) { //nolint:staticcheck // SA1019: deprecated but functional
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to inspect image %s: %w", imageName, err)
|
||||
|
||||
return false, fmt.Errorf("inspecting image %s: %w", imageName, err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
@@ -502,13 +527,14 @@ func ensureImageAvailable(ctx context.Context, cli *client.Client, imageName str
|
||||
// First check if image is available locally
|
||||
available, err := checkImageAvailableLocally(ctx, cli, imageName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check local image availability: %w", err)
|
||||
return fmt.Errorf("checking local image availability: %w", err)
|
||||
}
|
||||
|
||||
if available {
|
||||
if verbose {
|
||||
log.Printf("Image %s is available locally", imageName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -519,20 +545,21 @@ func ensureImageAvailable(ctx context.Context, cli *client.Client, imageName str
|
||||
|
||||
reader, err := cli.ImagePull(ctx, imageName, image.PullOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to pull image %s: %w", imageName, err)
|
||||
return fmt.Errorf("pulling image %s: %w", imageName, err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
if verbose {
|
||||
_, err = io.Copy(os.Stdout, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read pull output: %w", err)
|
||||
return fmt.Errorf("reading pull output: %w", err)
|
||||
}
|
||||
} else {
|
||||
_, err = io.Copy(io.Discard, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read pull output: %w", err)
|
||||
return fmt.Errorf("reading pull output: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("Image %s pulled successfully", imageName)
|
||||
}
|
||||
|
||||
@@ -547,9 +574,11 @@ func listControlFiles(logsDir string) {
|
||||
return
|
||||
}
|
||||
|
||||
var logFiles []string
|
||||
var dataFiles []string
|
||||
var dataDirs []string
|
||||
var (
|
||||
logFiles []string
|
||||
dataFiles []string
|
||||
dataDirs []string
|
||||
)
|
||||
|
||||
for _, entry := range entries {
|
||||
name := entry.Name()
|
||||
@@ -578,6 +607,7 @@ func listControlFiles(logsDir string) {
|
||||
|
||||
if len(logFiles) > 0 {
|
||||
log.Printf("Headscale logs:")
|
||||
|
||||
for _, file := range logFiles {
|
||||
log.Printf(" %s", file)
|
||||
}
|
||||
@@ -585,9 +615,11 @@ func listControlFiles(logsDir string) {
|
||||
|
||||
if len(dataFiles) > 0 || len(dataDirs) > 0 {
|
||||
log.Printf("Headscale data:")
|
||||
|
||||
for _, file := range dataFiles {
|
||||
log.Printf(" %s", file)
|
||||
}
|
||||
|
||||
for _, dir := range dataDirs {
|
||||
log.Printf(" %s/", dir)
|
||||
}
|
||||
@@ -596,25 +628,27 @@ func listControlFiles(logsDir string) {
|
||||
|
||||
// extractArtifactsFromContainers collects container logs and files from the specific test run.
|
||||
func extractArtifactsFromContainers(ctx context.Context, testContainerID, logsDir string, verbose bool) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// List all containers
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers: %w", err)
|
||||
return fmt.Errorf("listing containers: %w", err)
|
||||
}
|
||||
|
||||
// Get containers from the specific test run
|
||||
currentTestContainers := getCurrentTestContainers(containers, testContainerID, verbose)
|
||||
|
||||
extractedCount := 0
|
||||
|
||||
for _, cont := range currentTestContainers {
|
||||
// Extract container logs and tar files
|
||||
if err := extractContainerArtifacts(ctx, cli, cont.ID, cont.name, logsDir, verbose); err != nil {
|
||||
err := extractContainerArtifacts(ctx, cli, cont.ID, cont.name, logsDir, verbose)
|
||||
if err != nil {
|
||||
if verbose {
|
||||
log.Printf("Warning: failed to extract artifacts from container %s (%s): %v", cont.name, cont.ID[:12], err)
|
||||
}
|
||||
@@ -622,6 +656,7 @@ func extractArtifactsFromContainers(ctx context.Context, testContainerID, logsDi
|
||||
if verbose {
|
||||
log.Printf("Extracted artifacts from container %s (%s)", cont.name, cont.ID[:12])
|
||||
}
|
||||
|
||||
extractedCount++
|
||||
}
|
||||
}
|
||||
@@ -645,11 +680,13 @@ func getCurrentTestContainers(containers []container.Summary, testContainerID st
|
||||
|
||||
// Find the test container to get its run ID label
|
||||
var runID string
|
||||
|
||||
for _, cont := range containers {
|
||||
if cont.ID == testContainerID {
|
||||
if cont.Labels != nil {
|
||||
runID = cont.Labels["hi.run-id"]
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -690,18 +727,21 @@ func getCurrentTestContainers(containers []container.Summary, testContainerID st
|
||||
// extractContainerArtifacts saves logs and tar files from a container.
|
||||
func extractContainerArtifacts(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {
|
||||
// Ensure the logs directory exists
|
||||
if err := os.MkdirAll(logsDir, 0o755); err != nil {
|
||||
return fmt.Errorf("failed to create logs directory: %w", err)
|
||||
err := os.MkdirAll(logsDir, defaultDirPerm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating logs directory: %w", err)
|
||||
}
|
||||
|
||||
// Extract container logs
|
||||
if err := extractContainerLogs(ctx, cli, containerID, containerName, logsDir, verbose); err != nil {
|
||||
return fmt.Errorf("failed to extract logs: %w", err)
|
||||
err = extractContainerLogs(ctx, cli, containerID, containerName, logsDir, verbose)
|
||||
if err != nil {
|
||||
return fmt.Errorf("extracting logs: %w", err)
|
||||
}
|
||||
|
||||
// Extract tar files for headscale containers only
|
||||
if strings.HasPrefix(containerName, "hs-") {
|
||||
if err := extractContainerFiles(ctx, cli, containerID, containerName, logsDir, verbose); err != nil {
|
||||
err := extractContainerFiles(ctx, cli, containerID, containerName, logsDir, verbose)
|
||||
if err != nil {
|
||||
if verbose {
|
||||
log.Printf("Warning: failed to extract files from %s: %v", containerName, err)
|
||||
}
|
||||
@@ -723,7 +763,7 @@ func extractContainerLogs(ctx context.Context, cli *client.Client, containerID,
|
||||
Tail: "all",
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get container logs: %w", err)
|
||||
return fmt.Errorf("getting container logs: %w", err)
|
||||
}
|
||||
defer logReader.Close()
|
||||
|
||||
@@ -737,17 +777,17 @@ func extractContainerLogs(ctx context.Context, cli *client.Client, containerID,
|
||||
// Demultiplex the Docker logs stream to separate stdout and stderr
|
||||
_, err = stdcopy.StdCopy(&stdoutBuf, &stderrBuf, logReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to demultiplex container logs: %w", err)
|
||||
return fmt.Errorf("demultiplexing container logs: %w", err)
|
||||
}
|
||||
|
||||
// Write stdout logs
|
||||
if err := os.WriteFile(stdoutPath, stdoutBuf.Bytes(), 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write stdout log: %w", err)
|
||||
if err := os.WriteFile(stdoutPath, stdoutBuf.Bytes(), 0o644); err != nil { //nolint:gosec,noinlineerr // log files should be readable
|
||||
return fmt.Errorf("writing stdout log: %w", err)
|
||||
}
|
||||
|
||||
// Write stderr logs
|
||||
if err := os.WriteFile(stderrPath, stderrBuf.Bytes(), 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write stderr log: %w", err)
|
||||
if err := os.WriteFile(stderrPath, stderrBuf.Bytes(), 0o644); err != nil { //nolint:gosec,noinlineerr // log files should be readable
|
||||
return fmt.Errorf("writing stderr log: %w", err)
|
||||
}
|
||||
|
||||
if verbose {
|
||||
|
||||
@@ -38,13 +38,13 @@ func runDoctorCheck(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// Check 3: Go installation
|
||||
results = append(results, checkGoInstallation())
|
||||
results = append(results, checkGoInstallation(ctx))
|
||||
|
||||
// Check 4: Git repository
|
||||
results = append(results, checkGitRepository())
|
||||
results = append(results, checkGitRepository(ctx))
|
||||
|
||||
// Check 5: Required files
|
||||
results = append(results, checkRequiredFiles())
|
||||
results = append(results, checkRequiredFiles(ctx))
|
||||
|
||||
// Display results
|
||||
displayDoctorResults(results)
|
||||
@@ -86,7 +86,7 @@ func checkDockerBinary() DoctorResult {
|
||||
|
||||
// checkDockerDaemon verifies Docker daemon is running and accessible.
|
||||
func checkDockerDaemon(ctx context.Context) DoctorResult {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Daemon",
|
||||
@@ -124,8 +124,8 @@ func checkDockerDaemon(ctx context.Context) DoctorResult {
|
||||
}
|
||||
|
||||
// checkDockerContext verifies Docker context configuration.
|
||||
func checkDockerContext(_ context.Context) DoctorResult {
|
||||
contextInfo, err := getCurrentDockerContext()
|
||||
func checkDockerContext(ctx context.Context) DoctorResult {
|
||||
contextInfo, err := getCurrentDockerContext(ctx)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Context",
|
||||
@@ -155,7 +155,7 @@ func checkDockerContext(_ context.Context) DoctorResult {
|
||||
|
||||
// checkDockerSocket verifies Docker socket accessibility.
|
||||
func checkDockerSocket(ctx context.Context) DoctorResult {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Socket",
|
||||
@@ -192,7 +192,7 @@ func checkDockerSocket(ctx context.Context) DoctorResult {
|
||||
|
||||
// checkGolangImage verifies the golang Docker image is available locally or can be pulled.
|
||||
func checkGolangImage(ctx context.Context) DoctorResult {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Golang Image",
|
||||
@@ -251,7 +251,7 @@ func checkGolangImage(ctx context.Context) DoctorResult {
|
||||
}
|
||||
|
||||
// checkGoInstallation verifies Go is installed and working.
|
||||
func checkGoInstallation() DoctorResult {
|
||||
func checkGoInstallation(ctx context.Context) DoctorResult {
|
||||
_, err := exec.LookPath("go")
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
@@ -265,7 +265,8 @@ func checkGoInstallation() DoctorResult {
|
||||
}
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", "version")
|
||||
cmd := exec.CommandContext(ctx, "go", "version")
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
@@ -285,8 +286,9 @@ func checkGoInstallation() DoctorResult {
|
||||
}
|
||||
|
||||
// checkGitRepository verifies we're in a git repository.
|
||||
func checkGitRepository() DoctorResult {
|
||||
cmd := exec.Command("git", "rev-parse", "--git-dir")
|
||||
func checkGitRepository(ctx context.Context) DoctorResult {
|
||||
cmd := exec.CommandContext(ctx, "git", "rev-parse", "--git-dir")
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
@@ -308,7 +310,7 @@ func checkGitRepository() DoctorResult {
|
||||
}
|
||||
|
||||
// checkRequiredFiles verifies required files exist.
|
||||
func checkRequiredFiles() DoctorResult {
|
||||
func checkRequiredFiles(ctx context.Context) DoctorResult {
|
||||
requiredFiles := []string{
|
||||
"go.mod",
|
||||
"integration/",
|
||||
@@ -316,9 +318,12 @@ func checkRequiredFiles() DoctorResult {
|
||||
}
|
||||
|
||||
var missingFiles []string
|
||||
|
||||
for _, file := range requiredFiles {
|
||||
cmd := exec.Command("test", "-e", file)
|
||||
if err := cmd.Run(); err != nil {
|
||||
cmd := exec.CommandContext(ctx, "test", "-e", file)
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
missingFiles = append(missingFiles, file)
|
||||
}
|
||||
}
|
||||
@@ -350,6 +355,7 @@ func displayDoctorResults(results []DoctorResult) {
|
||||
|
||||
for _, result := range results {
|
||||
var icon string
|
||||
|
||||
switch result.Status {
|
||||
case "PASS":
|
||||
icon = "✅"
|
||||
|
||||
@@ -79,13 +79,18 @@ func main() {
|
||||
}
|
||||
|
||||
func cleanAll(ctx context.Context) error {
|
||||
if err := killTestContainers(ctx); err != nil {
|
||||
err := killTestContainers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pruneDockerNetworks(ctx); err != nil {
|
||||
|
||||
err = pruneDockerNetworks(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cleanOldImages(ctx); err != nil {
|
||||
|
||||
err = cleanOldImages(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -48,7 +48,9 @@ func runIntegrationTest(env *command.Env) error {
|
||||
if runConfig.Verbose {
|
||||
log.Printf("Running pre-flight system checks...")
|
||||
}
|
||||
if err := runDoctorCheck(env.Context()); err != nil {
|
||||
|
||||
err := runDoctorCheck(env.Context())
|
||||
if err != nil {
|
||||
return fmt.Errorf("pre-flight checks failed: %w", err)
|
||||
}
|
||||
|
||||
@@ -66,15 +68,15 @@ func runIntegrationTest(env *command.Env) error {
|
||||
func detectGoVersion() string {
|
||||
goModPath := filepath.Join("..", "..", "go.mod")
|
||||
|
||||
if _, err := os.Stat("go.mod"); err == nil {
|
||||
if _, err := os.Stat("go.mod"); err == nil { //nolint:noinlineerr
|
||||
goModPath = "go.mod"
|
||||
} else if _, err := os.Stat("../../go.mod"); err == nil {
|
||||
} else if _, err := os.Stat("../../go.mod"); err == nil { //nolint:noinlineerr
|
||||
goModPath = "../../go.mod"
|
||||
}
|
||||
|
||||
content, err := os.ReadFile(goModPath)
|
||||
if err != nil {
|
||||
return "1.25"
|
||||
return "1.26.1"
|
||||
}
|
||||
|
||||
lines := splitLines(string(content))
|
||||
@@ -89,13 +91,15 @@ func detectGoVersion() string {
|
||||
}
|
||||
}
|
||||
|
||||
return "1.25"
|
||||
return "1.26.1"
|
||||
}
|
||||
|
||||
// splitLines splits a string into lines without using strings.Split.
|
||||
func splitLines(s string) []string {
|
||||
var lines []string
|
||||
var current string
|
||||
var (
|
||||
lines []string
|
||||
current string
|
||||
)
|
||||
|
||||
for _, char := range s {
|
||||
if char == '\n' {
|
||||
|
||||
@@ -18,6 +18,9 @@ import (
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
// ErrStatsCollectionAlreadyStarted is returned when trying to start stats collection that is already running.
|
||||
var ErrStatsCollectionAlreadyStarted = errors.New("stats collection already started")
|
||||
|
||||
// ContainerStats represents statistics for a single container.
|
||||
type ContainerStats struct {
|
||||
ContainerID string
|
||||
@@ -44,10 +47,10 @@ type StatsCollector struct {
|
||||
}
|
||||
|
||||
// NewStatsCollector creates a new stats collector instance.
|
||||
func NewStatsCollector() (*StatsCollector, error) {
|
||||
cli, err := createDockerClient()
|
||||
func NewStatsCollector(ctx context.Context) (*StatsCollector, error) {
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return nil, fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
|
||||
return &StatsCollector{
|
||||
@@ -63,17 +66,19 @@ func (sc *StatsCollector) StartCollection(ctx context.Context, runID string, ver
|
||||
defer sc.mutex.Unlock()
|
||||
|
||||
if sc.collectionStarted {
|
||||
return errors.New("stats collection already started")
|
||||
return ErrStatsCollectionAlreadyStarted
|
||||
}
|
||||
|
||||
sc.collectionStarted = true
|
||||
|
||||
// Start monitoring existing containers
|
||||
sc.wg.Add(1)
|
||||
|
||||
go sc.monitorExistingContainers(ctx, runID, verbose)
|
||||
|
||||
// Start Docker events monitoring for new containers
|
||||
sc.wg.Add(1)
|
||||
|
||||
go sc.monitorDockerEvents(ctx, runID, verbose)
|
||||
|
||||
if verbose {
|
||||
@@ -87,10 +92,12 @@ func (sc *StatsCollector) StartCollection(ctx context.Context, runID string, ver
|
||||
func (sc *StatsCollector) StopCollection() {
|
||||
// Check if already stopped without holding lock
|
||||
sc.mutex.RLock()
|
||||
|
||||
if !sc.collectionStarted {
|
||||
sc.mutex.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
sc.mutex.RUnlock()
|
||||
|
||||
// Signal stop to all goroutines
|
||||
@@ -114,6 +121,7 @@ func (sc *StatsCollector) monitorExistingContainers(ctx context.Context, runID s
|
||||
if verbose {
|
||||
log.Printf("Failed to list existing containers: %v", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -147,13 +155,13 @@ func (sc *StatsCollector) monitorDockerEvents(ctx context.Context, runID string,
|
||||
case event := <-events:
|
||||
if event.Type == "container" && event.Action == "start" {
|
||||
// Get container details
|
||||
containerInfo, err := sc.client.ContainerInspect(ctx, event.ID)
|
||||
containerInfo, err := sc.client.ContainerInspect(ctx, event.ID) //nolint:staticcheck // SA1019: use Actor.ID
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Convert to types.Container format for consistency
|
||||
cont := types.Container{
|
||||
cont := types.Container{ //nolint:staticcheck // SA1019: use container.Summary
|
||||
ID: containerInfo.ID,
|
||||
Names: []string{containerInfo.Name},
|
||||
Labels: containerInfo.Config.Labels,
|
||||
@@ -167,13 +175,14 @@ func (sc *StatsCollector) monitorDockerEvents(ctx context.Context, runID string,
|
||||
if verbose {
|
||||
log.Printf("Error in Docker events stream: %v", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// shouldMonitorContainer determines if a container should be monitored.
|
||||
func (sc *StatsCollector) shouldMonitorContainer(cont types.Container, runID string) bool {
|
||||
func (sc *StatsCollector) shouldMonitorContainer(cont types.Container, runID string) bool { //nolint:staticcheck // SA1019: use container.Summary
|
||||
// Check if it has the correct run ID label
|
||||
if cont.Labels == nil || cont.Labels["hi.run-id"] != runID {
|
||||
return false
|
||||
@@ -213,6 +222,7 @@ func (sc *StatsCollector) startStatsForContainer(ctx context.Context, containerI
|
||||
}
|
||||
|
||||
sc.wg.Add(1)
|
||||
|
||||
go sc.collectStatsForContainer(ctx, containerID, verbose)
|
||||
}
|
||||
|
||||
@@ -226,12 +236,14 @@ func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containe
|
||||
if verbose {
|
||||
log.Printf("Failed to get stats stream for container %s: %v", containerID[:12], err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
defer statsResponse.Body.Close()
|
||||
|
||||
decoder := json.NewDecoder(statsResponse.Body)
|
||||
var prevStats *container.Stats
|
||||
|
||||
var prevStats *container.Stats //nolint:staticcheck // SA1019: use StatsResponse
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -240,12 +252,15 @@ func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containe
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
var stats container.Stats
|
||||
if err := decoder.Decode(&stats); err != nil {
|
||||
var stats container.Stats //nolint:staticcheck // SA1019: use StatsResponse
|
||||
|
||||
err := decoder.Decode(&stats)
|
||||
if err != nil {
|
||||
// EOF is expected when container stops or stream ends
|
||||
if err.Error() != "EOF" && verbose {
|
||||
log.Printf("Failed to decode stats for container %s: %v", containerID[:12], err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -261,8 +276,10 @@ func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containe
|
||||
// Store the sample (skip first sample since CPU calculation needs previous stats)
|
||||
if prevStats != nil {
|
||||
// Get container stats reference without holding the main mutex
|
||||
var containerStats *ContainerStats
|
||||
var exists bool
|
||||
var (
|
||||
containerStats *ContainerStats
|
||||
exists bool
|
||||
)
|
||||
|
||||
sc.mutex.RLock()
|
||||
containerStats, exists = sc.containers[containerID]
|
||||
@@ -286,7 +303,7 @@ func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containe
|
||||
}
|
||||
|
||||
// calculateCPUPercent calculates CPU usage percentage from Docker stats.
|
||||
func calculateCPUPercent(prevStats, stats *container.Stats) float64 {
|
||||
func calculateCPUPercent(prevStats, stats *container.Stats) float64 { //nolint:staticcheck // SA1019: use StatsResponse
|
||||
// CPU calculation based on Docker's implementation
|
||||
cpuDelta := float64(stats.CPUStats.CPUUsage.TotalUsage) - float64(prevStats.CPUStats.CPUUsage.TotalUsage)
|
||||
systemDelta := float64(stats.CPUStats.SystemUsage) - float64(prevStats.CPUStats.SystemUsage)
|
||||
@@ -331,10 +348,12 @@ type StatsSummary struct {
|
||||
func (sc *StatsCollector) GetSummary() []ContainerStatsSummary {
|
||||
// Take snapshot of container references without holding main lock long
|
||||
sc.mutex.RLock()
|
||||
|
||||
containerRefs := make([]*ContainerStats, 0, len(sc.containers))
|
||||
for _, containerStats := range sc.containers {
|
||||
containerRefs = append(containerRefs, containerStats)
|
||||
}
|
||||
|
||||
sc.mutex.RUnlock()
|
||||
|
||||
summaries := make([]ContainerStatsSummary, 0, len(containerRefs))
|
||||
@@ -384,23 +403,25 @@ func calculateStatsSummary(values []float64) StatsSummary {
|
||||
return StatsSummary{}
|
||||
}
|
||||
|
||||
min := values[0]
|
||||
max := values[0]
|
||||
minVal := values[0]
|
||||
maxVal := values[0]
|
||||
sum := 0.0
|
||||
|
||||
for _, value := range values {
|
||||
if value < min {
|
||||
min = value
|
||||
if value < minVal {
|
||||
minVal = value
|
||||
}
|
||||
if value > max {
|
||||
max = value
|
||||
|
||||
if value > maxVal {
|
||||
maxVal = value
|
||||
}
|
||||
|
||||
sum += value
|
||||
}
|
||||
|
||||
return StatsSummary{
|
||||
Min: min,
|
||||
Max: max,
|
||||
Min: minVal,
|
||||
Max: maxVal,
|
||||
Average: sum / float64(len(values)),
|
||||
}
|
||||
}
|
||||
@@ -434,6 +455,7 @@ func (sc *StatsCollector) CheckMemoryLimits(hsLimitMB, tsLimitMB float64) []Memo
|
||||
}
|
||||
|
||||
summaries := sc.GetSummary()
|
||||
|
||||
var violations []MemoryViolation
|
||||
|
||||
for _, summary := range summaries {
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@@ -15,7 +16,10 @@ type MapConfig struct {
|
||||
Directory string `flag:"directory,Directory to read map responses from"`
|
||||
}
|
||||
|
||||
var mapConfig MapConfig
|
||||
var (
|
||||
mapConfig MapConfig
|
||||
errDirectoryRequired = errors.New("directory is required")
|
||||
)
|
||||
|
||||
func main() {
|
||||
root := command.C{
|
||||
@@ -40,7 +44,7 @@ func main() {
|
||||
// runIntegrationTest executes the integration test workflow.
|
||||
func runOnline(env *command.Env) error {
|
||||
if mapConfig.Directory == "" {
|
||||
return fmt.Errorf("directory is required")
|
||||
return errDirectoryRequired
|
||||
}
|
||||
|
||||
resps, err := mapper.ReadMapResponsesFromDirectory(mapConfig.Directory)
|
||||
@@ -57,5 +61,6 @@ func runOnline(env *command.Env) error {
|
||||
|
||||
os.Stderr.Write(out)
|
||||
os.Stderr.Write([]byte("\n"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
{%
|
||||
include-markdown "../../CONTRIBUTING.md"
|
||||
include-markdown "../../CONTRIBUTING.md"
|
||||
%}
|
||||
|
||||
@@ -24,9 +24,12 @@ We are more than happy to exchange emails, or to have dedicated calls before a P
|
||||
|
||||
## When/Why is Feature X going to be implemented?
|
||||
|
||||
We don't know. We might be working on it. If you're interested in contributing, please post a feature request about it.
|
||||
We use [GitHub Milestones to plan for upcoming Headscale releases](https://github.com/juanfont/headscale/milestones).
|
||||
Have a look at [our current plan](https://github.com/juanfont/headscale/milestones) to get an idea when a specific
|
||||
feature is about to be implemented. The release plan is subject to change at any time.
|
||||
|
||||
Please be aware that there are a number of reasons why we might not accept specific contributions:
|
||||
If you're interested in contributing, please post a feature request about it. Please be aware that there are a number of
|
||||
reasons why we might not accept specific contributions:
|
||||
|
||||
- It is not possible to implement the feature in a way that makes sense in a self-hosted environment.
|
||||
- Given that we are reverse-engineering Tailscale to satisfy our own curiosity, we might be interested in implementing the feature ourselves.
|
||||
@@ -47,8 +50,8 @@ we have a "docker-issues" channel where you can ask for Docker-specific help to
|
||||
## What is the recommended update path? Can I skip multiple versions while updating?
|
||||
|
||||
Please follow the steps outlined in the [upgrade guide](../setup/upgrade.md) to update your existing Headscale
|
||||
installation. Its best to update from one stable version to the next (e.g. 0.24.0 → 0.25.1 → 0.26.1) in case
|
||||
you are multiple releases behind. You should always pick the latest available patch release.
|
||||
installation. Its required to update from one stable version to the next (e.g. 0.26.0 → 0.27.1 → 0.28.0) without
|
||||
skipping minor versions in between. You should always pick the latest available patch release.
|
||||
|
||||
Be sure to check the [changelog](https://github.com/juanfont/headscale/blob/main/CHANGELOG.md) for version specific
|
||||
upgrade instructions and breaking changes.
|
||||
@@ -70,12 +73,12 @@ of Headscale:
|
||||
|
||||
1. An environment with 1000 servers
|
||||
|
||||
- they rarely "move" (change their endpoints)
|
||||
- new nodes are added rarely
|
||||
- they rarely "move" (change their endpoints)
|
||||
- new nodes are added rarely
|
||||
|
||||
2. An environment with 80 laptops/phones (end user devices)
|
||||
1. An environment with 80 laptops/phones (end user devices)
|
||||
|
||||
- nodes move often, e.g. switching from home to office
|
||||
- nodes move often, e.g. switching from home to office
|
||||
|
||||
Headscale calculates a map of all nodes that need to talk to each other,
|
||||
creating this "world map" requires a lot of CPU time. When an event that
|
||||
@@ -139,8 +142,8 @@ connect back to the administrator's node. Why do all nodes see the administrator
|
||||
`tailscale status`?
|
||||
|
||||
This is essentially how Tailscale works. If traffic is allowed to flow in one direction, then both nodes see each other
|
||||
in their output of `tailscale status`. Traffic is still filtered according to the ACL, with the exception of `tailscale
|
||||
ping` which is always allowed in either direction.
|
||||
in their output of `tailscale status`. Traffic is still filtered according to the ACL, with the exception of
|
||||
`tailscale ping` which is always allowed in either direction.
|
||||
|
||||
See also <https://tailscale.com/kb/1087/device-visibility>.
|
||||
|
||||
@@ -157,8 +160,41 @@ indicates which part of the policy is invalid. Follow these steps to fix your po
|
||||
!!! warning "Full server configuration required"
|
||||
|
||||
The above commands to get/set the policy require a complete server configuration file including database settings. A
|
||||
minimal config to [control Headscale via remote CLI](../ref/api.md#grpc) is not sufficient. You may use `headscale
|
||||
-c /path/to/config.yaml` to specify the path to an alternative configuration file.
|
||||
minimal config to [control Headscale via remote CLI](../ref/api.md#grpc) is not sufficient. You may use
|
||||
`headscale -c /path/to/config.yaml` to specify the path to an alternative configuration file.
|
||||
|
||||
## How can I migrate back to the recommended IP prefixes?
|
||||
|
||||
Tailscale only supports the IP prefixes `100.64.0.0/10` and `fd7a:115c:a1e0::/48` or smaller subnets thereof. The
|
||||
following steps can be used to migrate from unsupported IP prefixes back to the supported and recommended ones.
|
||||
|
||||
!!! warning "Backup and test in a demo environment required"
|
||||
|
||||
The commands below update the IP addresses of all nodes in your tailnet and this might have a severe impact in your
|
||||
specific environment. At a minimum:
|
||||
|
||||
- [Create a backup of your database](../setup/upgrade.md#backup)
|
||||
- Test the commands below in a representive demo environment. This allows to catch subsequent connectivity errors
|
||||
early and see how the tailnet behaves in your specific environment.
|
||||
|
||||
- Stop Headscale
|
||||
- Restore the default prefixes in the [configuration file](../ref/configuration.md):
|
||||
```yaml
|
||||
prefixes:
|
||||
v4: 100.64.0.0/10
|
||||
v6: fd7a:115c:a1e0::/48
|
||||
```
|
||||
- Update the `nodes.ipv4` and `nodes.ipv6` columns in the database and assign each node a unique IPv4 and IPv6 address.
|
||||
The following SQL statement assigns IP addresses based on the node ID:
|
||||
```sql
|
||||
UPDATE nodes
|
||||
SET ipv4=concat('100.64.', id/256, '.', id%256),
|
||||
ipv6=concat('fd7a:115c:a1e0::', format('%x', id));
|
||||
```
|
||||
- Update the [policy](../ref/acls.md) to reflect the IP address changes (if any)
|
||||
- Start Headscale
|
||||
|
||||
Nodes should reconnect within a few seconds and pickup their newly assigned IP addresses.
|
||||
|
||||
## How can I avoid to send logs to Tailscale Inc?
|
||||
|
||||
|
||||
@@ -5,16 +5,16 @@ to provide self-hosters and hobbyists with an open-source server they can use fo
|
||||
provides on overview of Headscale's feature and compatibility with the Tailscale control server:
|
||||
|
||||
- [x] Full "base" support of Tailscale's features
|
||||
- [x] Node registration
|
||||
- [x] Interactive
|
||||
- [x] Pre authenticated key
|
||||
- [x] [Node registration](../ref/registration.md)
|
||||
- [x] [Web authentication](../ref/registration.md#web-authentication)
|
||||
- [x] [Pre authenticated key](../ref/registration.md#pre-authenticated-key)
|
||||
- [x] [DNS](../ref/dns.md)
|
||||
- [x] [MagicDNS](https://tailscale.com/kb/1081/magicdns)
|
||||
- [x] [Global and restricted nameservers (split DNS)](https://tailscale.com/kb/1054/dns#nameservers)
|
||||
- [x] [search domains](https://tailscale.com/kb/1054/dns#search-domains)
|
||||
- [x] [Extra DNS records (Headscale only)](../ref/dns.md#setting-extra-dns-records)
|
||||
- [x] [Taildrop (File Sharing)](https://tailscale.com/kb/1106/taildrop)
|
||||
- [x] [Tags](https://tailscale.com/kb/1068/tags)
|
||||
- [x] [Tags](../ref/tags.md)
|
||||
- [x] [Routes](../ref/routes.md)
|
||||
- [x] [Subnet routers](../ref/routes.md#subnet-router)
|
||||
- [x] [Exit nodes](../ref/routes.md#exit-node)
|
||||
@@ -29,7 +29,7 @@ provides on overview of Headscale's feature and compatibility with the Tailscale
|
||||
routers](../ref/routes.md#automatically-approve-routes-of-a-subnet-router) and [exit
|
||||
nodes](../ref/routes.md#automatically-approve-an-exit-node-with-auto-approvers)
|
||||
- [x] [Tailscale SSH](https://tailscale.com/kb/1193/tailscale-ssh)
|
||||
* [x] [Node registration using Single-Sign-On (OpenID Connect)](../ref/oidc.md) ([GitHub label "OIDC"](https://github.com/juanfont/headscale/labels/OIDC))
|
||||
- [x] [Node registration using Single-Sign-On (OpenID Connect)](../ref/oidc.md) ([GitHub label "OIDC"](https://github.com/juanfont/headscale/labels/OIDC))
|
||||
- [x] Basic registration
|
||||
- [x] Update user profile from identity provider
|
||||
- [ ] OIDC groups cannot be used in ACLs
|
||||
|
||||
@@ -222,7 +222,7 @@ Allows access to the internet through [exit nodes](routes.md#exit-node). Can onl
|
||||
|
||||
### `autogroup:member`
|
||||
|
||||
Includes all untagged devices.
|
||||
Includes all [personal (untagged) devices](registration.md/#identity-model).
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -234,7 +234,7 @@ Includes all untagged devices.
|
||||
|
||||
### `autogroup:tagged`
|
||||
|
||||
Includes all devices that have at least one tag.
|
||||
Includes all devices that [have at least one tag](registration.md/#identity-model).
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -245,7 +245,6 @@ Includes all devices that have at least one tag.
|
||||
```
|
||||
|
||||
### `autogroup:self`
|
||||
**(EXPERIMENTAL)**
|
||||
|
||||
!!! warning "The current implementation of `autogroup:self` is inefficient"
|
||||
|
||||
@@ -258,9 +257,11 @@ Includes devices where the same user is authenticated on both the source and des
|
||||
"dst": ["autogroup:self:*"]
|
||||
}
|
||||
```
|
||||
|
||||
*Using `autogroup:self` may cause performance degradation on the Headscale coordinator server in large deployments, as filter rules must be compiled per-node rather than globally and the current implementation is not very efficient.*
|
||||
|
||||
If you experience performance issues, consider using more specific ACL rules or limiting the use of `autogroup:self`.
|
||||
|
||||
```json
|
||||
{
|
||||
// The following rules allow internal users to communicate with their
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# API
|
||||
|
||||
Headscale provides a [HTTP REST API](#rest-api) and a [gRPC interface](#grpc) which may be used to integrate a [web
|
||||
interface](integration/web-ui.md), [remote control Headscale](#setup-remote-control) or provide a base for custom
|
||||
integration and tooling.
|
||||
@@ -30,8 +31,7 @@ headscale apikeys expire --prefix <PREFIX>
|
||||
- API endpoint: `/api/v1`, e.g. `https://headscale.example.com/api/v1`
|
||||
- Documentation: `/swagger`, e.g. `https://headscale.example.com/swagger`
|
||||
- Headscale Version: `/version`, e.g. `https://headscale.example.com/version`
|
||||
- Authenticate using HTTP Bearer authentication by sending the [API key](#api) with the HTTP `Authorization: Bearer
|
||||
<API_KEY>` header.
|
||||
- Authenticate using HTTP Bearer authentication by sending the [API key](#api) with the HTTP `Authorization: Bearer <API_KEY>` header.
|
||||
|
||||
Start by [creating an API key](#api) and test it with the examples below. Read the API documentation provided by your
|
||||
Headscale server at `/swagger` for details.
|
||||
@@ -54,8 +54,8 @@ Headscale server at `/swagger` for details.
|
||||
|
||||
```console
|
||||
curl -H "Authorization: Bearer <API_KEY>" \
|
||||
-d user=<USER> -d key=<KEY> \
|
||||
https://headscale.example.com/api/v1/node/register
|
||||
--json '{"user": "<USER>", "authId": "AUTH_ID>"}' \
|
||||
https://headscale.example.com/api/v1/auth/register
|
||||
```
|
||||
|
||||
## gRPC
|
||||
@@ -72,17 +72,17 @@ The gRPC interface can be used to control a Headscale instance from a remote mac
|
||||
|
||||
### Setup remote control
|
||||
|
||||
1. Download the [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases). Make
|
||||
sure to use the same version as on the server.
|
||||
1. Download the [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases). Make
|
||||
sure to use the same version as on the server.
|
||||
|
||||
1. Put the binary somewhere in your `PATH`, e.g. `/usr/local/bin/headscale`
|
||||
1. Put the binary somewhere in your `PATH`, e.g. `/usr/local/bin/headscale`
|
||||
|
||||
1. Make `headscale` executable: `chmod +x /usr/local/bin/headscale`
|
||||
1. Make `headscale` executable: `chmod +x /usr/local/bin/headscale`
|
||||
|
||||
1. [Create an API key](#api) on the Headscale server.
|
||||
1. [Create an API key](#api) on the Headscale server.
|
||||
|
||||
1. Provide the connection parameters for the remote Headscale server either via a minimal YAML configuration file or
|
||||
via environment variables:
|
||||
1. Provide the connection parameters for the remote Headscale server either via a minimal YAML configuration file or
|
||||
via environment variables:
|
||||
|
||||
=== "Minimal YAML configuration file"
|
||||
|
||||
@@ -102,7 +102,7 @@ The gRPC interface can be used to control a Headscale instance from a remote mac
|
||||
This instructs the `headscale` binary to connect to a remote instance at `<HEADSCALE_ADDRESS>:<PORT>`, instead of
|
||||
connecting to the local instance.
|
||||
|
||||
1. Test the connection by listing all nodes:
|
||||
1. Test the connection by listing all nodes:
|
||||
|
||||
```shell
|
||||
headscale nodes list
|
||||
|
||||
@@ -17,8 +17,8 @@
|
||||
|
||||
=== "View on GitHub"
|
||||
|
||||
* Development version: <https://github.com/juanfont/headscale/blob/main/config-example.yaml>
|
||||
* Version {{ headscale.version }}: <https://github.com/juanfont/headscale/blob/v{{ headscale.version }}/config-example.yaml>
|
||||
- Development version: <https://github.com/juanfont/headscale/blob/main/config-example.yaml>
|
||||
- Version {{ headscale.version }}: https://github.com/juanfont/headscale/blob/v{{ headscale.version }}/config-example.yaml
|
||||
|
||||
=== "Download with `wget`"
|
||||
|
||||
|
||||
@@ -63,10 +63,10 @@ maps fetched via URL or to offer your own, custom DERP servers to nodes.
|
||||
ID. You can explicitly disable a specific region by setting its region ID to `null`. The following sample
|
||||
`derp.yaml` disables the New York DERP region (which has the region ID 1):
|
||||
|
||||
```yaml title="derp.yaml"
|
||||
regions:
|
||||
1: null
|
||||
```
|
||||
```yaml title="derp.yaml"
|
||||
regions:
|
||||
1: null
|
||||
```
|
||||
|
||||
Use the following configuration to serve the default DERP map (excluding New York) to nodes:
|
||||
|
||||
@@ -165,11 +165,10 @@ Any Tailscale client may be used to introspect the DERP map and to check for con
|
||||
Additional DERP related metrics and information is available via the [metrics and debug
|
||||
endpoint](./debug.md#metrics-and-debug-endpoint).
|
||||
|
||||
[^1]:
|
||||
This assumes that the default region code of the [configuration file](./configuration.md) is used.
|
||||
|
||||
## Limitations
|
||||
|
||||
- The embedded DERP server can't be used for Tailscale's captive portal checks as it doesn't support the `/generate_204`
|
||||
endpoint via HTTP on port tcp/80.
|
||||
- There are no speed or throughput optimisations, the main purpose is to assist in node connectivity.
|
||||
|
||||
[^1]: This assumes that the default region code of the [configuration file](./configuration.md) is used.
|
||||
|
||||
@@ -25,7 +25,7 @@ hostname and port combination "http://hostname-in-magic-dns.myvpn.example.com:30
|
||||
|
||||
Currently, [only A and AAAA records are processed by Tailscale](https://github.com/tailscale/tailscale/blob/v1.86.5/ipn/ipnlocal/node_backend.go#L662).
|
||||
|
||||
1. Configure extra DNS records using one of the available configuration options:
|
||||
1. Configure extra DNS records using one of the available configuration options:
|
||||
|
||||
=== "Static entries, via `dns.extra_records`"
|
||||
|
||||
@@ -66,12 +66,12 @@ hostname and port combination "http://hostname-in-magic-dns.myvpn.example.com:30
|
||||
|
||||
!!! tip "Good to know"
|
||||
|
||||
* The `dns.extra_records_path` option in the [configuration file](./configuration.md) needs to reference the
|
||||
- The `dns.extra_records_path` option in the [configuration file](./configuration.md) needs to reference the
|
||||
JSON file containing extra DNS records.
|
||||
* Be sure to "sort keys" and produce a stable output in case you generate the JSON file with a script.
|
||||
- Be sure to "sort keys" and produce a stable output in case you generate the JSON file with a script.
|
||||
Headscale uses a checksum to detect changes to the file and a stable output avoids unnecessary processing.
|
||||
|
||||
1. Verify that DNS records are properly set using the DNS querying tool of your choice:
|
||||
1. Verify that DNS records are properly set using the DNS querying tool of your choice:
|
||||
|
||||
=== "Query with dig"
|
||||
|
||||
@@ -87,7 +87,7 @@ hostname and port combination "http://hostname-in-magic-dns.myvpn.example.com:30
|
||||
100.64.0.3
|
||||
```
|
||||
|
||||
1. Optional: Setup the reverse proxy
|
||||
1. Optional: Setup the reverse proxy
|
||||
|
||||
The motivating example here was to be able to access internal monitoring services on the same host without
|
||||
specifying a port, depicted as NGINX configuration snippet:
|
||||
|
||||
@@ -20,5 +20,7 @@ Headscale doesn't provide a built-in web interface but users may pick one from t
|
||||
- [headscale-console](https://github.com/rickli-cloud/headscale-console) - WebAssembly-based client supporting SSH, VNC
|
||||
and RDP with optional self-service capabilities
|
||||
- [headscale-piying](https://github.com/wszgrcy/headscale-piying) - headscale web ui,support visual ACL configuration
|
||||
- [HeadControl](https://github.com/ahmadzip/HeadControl) - Minimal Headscale admin dashboard, built with Go and HTMX
|
||||
- [Headscale Manager](https://github.com/hkdone/headscalemanager) - Headscale UI for Android
|
||||
|
||||
You can ask for support on our [Discord server](https://discord.gg/c84AZQhmpx) in the "web-interfaces" channel.
|
||||
|
||||
102
docs/ref/oidc.md
102
docs/ref/oidc.md
@@ -40,9 +40,9 @@ A basic configuration connects Headscale to an identity provider and typically r
|
||||
|
||||
=== "Identity provider"
|
||||
|
||||
* Create a new confidential client (`Client ID`, `Client secret`)
|
||||
* Add Headscale's OIDC callback URL as valid redirect URL: `https://headscale.example.com/oidc/callback`
|
||||
* Configure additional parameters to improve user experience such as: name, description, logo, …
|
||||
- Create a new confidential client (`Client ID`, `Client secret`)
|
||||
- Add Headscale's OIDC callback URL as valid redirect URL: `https://headscale.example.com/oidc/callback`
|
||||
- Configure additional parameters to improve user experience such as: name, description, logo, …
|
||||
|
||||
### Enable PKCE (recommended)
|
||||
|
||||
@@ -63,8 +63,8 @@ recommended and needs to be configured for Headscale and the identity provider a
|
||||
|
||||
=== "Identity provider"
|
||||
|
||||
* Enable PKCE for the headscale client
|
||||
* Set the PKCE challenge method to "S256"
|
||||
- Enable PKCE for the headscale client
|
||||
- Set the PKCE challenge method to "S256"
|
||||
|
||||
### Authorize users with filters
|
||||
|
||||
@@ -75,11 +75,11 @@ are configured, a user needs to pass all of them.
|
||||
|
||||
=== "Allowed domains"
|
||||
|
||||
* Check the email domain of each authenticating user against the list of allowed domains and only authorize users
|
||||
- Check the email domain of each authenticating user against the list of allowed domains and only authorize users
|
||||
whose email domain matches `example.com`.
|
||||
* A verified email address is required [unless email verification is disabled](#control-email-verification).
|
||||
* Access allowed: `alice@example.com`
|
||||
* Access denied: `bob@example.net`
|
||||
- A verified email address is required [unless email verification is disabled](#control-email-verification).
|
||||
- Access allowed: `alice@example.com`
|
||||
- Access denied: `bob@example.net`
|
||||
|
||||
```yaml hl_lines="5-6"
|
||||
oidc:
|
||||
@@ -92,11 +92,11 @@ are configured, a user needs to pass all of them.
|
||||
|
||||
=== "Allowed users/emails"
|
||||
|
||||
* Check the email address of each authenticating user against the list of allowed email addresses and only authorize
|
||||
- Check the email address of each authenticating user against the list of allowed email addresses and only authorize
|
||||
users whose email is part of the `allowed_users` list.
|
||||
* A verified email address is required [unless email verification is disabled](#control-email-verification).
|
||||
* Access allowed: `alice@example.com`, `bob@example.net`
|
||||
* Access denied: `mallory@example.net`
|
||||
- A verified email address is required [unless email verification is disabled](#control-email-verification).
|
||||
- Access allowed: `alice@example.com`, `bob@example.net`
|
||||
- Access denied: `mallory@example.net`
|
||||
|
||||
```yaml hl_lines="5-7"
|
||||
oidc:
|
||||
@@ -110,10 +110,10 @@ are configured, a user needs to pass all of them.
|
||||
|
||||
=== "Allowed groups"
|
||||
|
||||
* Use the OIDC `groups` claim of each authenticating user to get their group membership and only authorize users
|
||||
- Use the OIDC `groups` claim of each authenticating user to get their group membership and only authorize users
|
||||
which are members in at least one of the referenced groups.
|
||||
* Access allowed: users in the `headscale_users` group
|
||||
* Access denied: users without groups, users with other groups
|
||||
- Access allowed: users in the `headscale_users` group
|
||||
- Access denied: users without groups, users with other groups
|
||||
|
||||
```yaml hl_lines="5-7"
|
||||
oidc:
|
||||
@@ -163,7 +163,6 @@ Access Token.
|
||||
Please keep in mind that the Access Token is typically a short-lived token that expires within a few minutes. You
|
||||
will have to configure token expiration in your identity provider to avoid frequent re-authentication.
|
||||
|
||||
|
||||
```yaml hl_lines="5"
|
||||
oidc:
|
||||
issuer: "https://sso.example.com"
|
||||
@@ -175,6 +174,7 @@ Access Token.
|
||||
!!! tip "Expire a node and force re-authentication"
|
||||
|
||||
A node can be expired immediately via:
|
||||
|
||||
```console
|
||||
headscale node expire -i <NODE_ID>
|
||||
```
|
||||
@@ -185,7 +185,8 @@ You may refer to users in the Headscale policy via:
|
||||
|
||||
- Email address
|
||||
- Username
|
||||
- Provider identifier (only available in the database or from your identity provider)
|
||||
- Provider identifier (this value is currently only available from the [API](api.md), database or directly from your
|
||||
identity provider)
|
||||
|
||||
!!! note "A user identifier in the policy must contain a single `@`"
|
||||
|
||||
@@ -200,6 +201,34 @@ You may refer to users in the Headscale policy via:
|
||||
consequences for Headscale where a policy might no longer work or a user might obtain more access by hijacking an
|
||||
existing username or email address.
|
||||
|
||||
!!! tip "Howto use the provider identifier in the policy"
|
||||
|
||||
The provider identifier uniquely identifies an OIDC user and a well-behaving identity provider guarantees that this
|
||||
value never changes for a particular user. It is usually an opaque and long string and its value is currently only
|
||||
available from the [API](api.md), database or directly from your identity provider).
|
||||
|
||||
Use the [API](api.md) with the `/api/v1/user` endpoint to fetch the provider identifier (`providerId`). The value
|
||||
(be sure to append an `@` in case the provider identifier doesn't already contain an `@` somewhere) can be used
|
||||
directly to reference a user in the policy. To improve readability of the policy, one may use the `groups` section
|
||||
as an alias:
|
||||
|
||||
```json
|
||||
{
|
||||
"groups": {
|
||||
"group:alice": [
|
||||
"https://soo.example.com/oauth2/openid/59ac9125-c31b-46c5-814e-06242908cf57@"
|
||||
]
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:alice"],
|
||||
"dst": ["*:*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Supported OIDC claims
|
||||
|
||||
Headscale uses [the standard OIDC claims](https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims) to
|
||||
@@ -250,8 +279,9 @@ Authelia is fully supported by Headscale.
|
||||
### Authentik
|
||||
|
||||
- Authentik is fully supported by Headscale.
|
||||
- [Headscale does not JSON Web Encryption](https://github.com/juanfont/headscale/issues/2446). Leave the field
|
||||
- [Headscale does not support JSON Web Encryption](https://github.com/juanfont/headscale/issues/2446). Leave the field
|
||||
`Encryption Key` in the providers section unset.
|
||||
- See Authentik's [Integrate with Headscale](https://integrations.goauthentik.io/networking/headscale/)
|
||||
|
||||
### Google OAuth
|
||||
|
||||
@@ -273,22 +303,30 @@ Console.
|
||||
#### Steps
|
||||
|
||||
1. Go to [Google Console](https://console.cloud.google.com) and login or create an account if you don't have one.
|
||||
2. Create a project (if you don't already have one).
|
||||
3. On the left hand menu, go to `APIs and services` -> `Credentials`
|
||||
4. Click `Create Credentials` -> `OAuth client ID`
|
||||
5. Under `Application Type`, choose `Web Application`
|
||||
6. For `Name`, enter whatever you like
|
||||
7. Under `Authorised redirect URIs`, add Headscale's OIDC callback URL: `https://headscale.example.com/oidc/callback`
|
||||
8. Click `Save` at the bottom of the form
|
||||
9. Take note of the `Client ID` and `Client secret`, you can also download it for reference if you need it.
|
||||
10. [Configure Headscale following the "Basic configuration" steps](#basic-configuration). The issuer URL for Google
|
||||
OAuth is: `https://accounts.google.com`.
|
||||
1. Create a project (if you don't already have one).
|
||||
1. On the left hand menu, go to `APIs and services` -> `Credentials`
|
||||
1. Click `Create Credentials` -> `OAuth client ID`
|
||||
1. Under `Application Type`, choose `Web Application`
|
||||
1. For `Name`, enter whatever you like
|
||||
1. Under `Authorised redirect URIs`, add Headscale's OIDC callback URL: `https://headscale.example.com/oidc/callback`
|
||||
1. Click `Save` at the bottom of the form
|
||||
1. Take note of the `Client ID` and `Client secret`, you can also download it for reference if you need it.
|
||||
1. [Configure Headscale following the "Basic configuration" steps](#basic-configuration). The issuer URL for Google
|
||||
OAuth is: `https://accounts.google.com`.
|
||||
|
||||
### Kanidm
|
||||
|
||||
- Kanidm is fully supported by Headscale.
|
||||
- Groups for the [allowed groups filter](#authorize-users-with-filters) need to be specified with their full SPN, for
|
||||
example: `headscale_users@sso.example.com`.
|
||||
- Kanidm sends the full SPN (`alice@sso.example.com`) as `preferred_username` by default. Headscale stores this value as
|
||||
username which might be confusing as the username and email fields now contain values that look like an email address.
|
||||
[Kanidm can be configured to send the short username as `preferred_username` attribute
|
||||
instead](https://kanidm.github.io/kanidm/stable/integrations/oauth2.html#short-names):
|
||||
```console
|
||||
kanidm system oauth2 prefer-short-username <client name>
|
||||
```
|
||||
Once configured, the short username in Headscale will be `alice` and can be referred to as `alice@` in the policy.
|
||||
|
||||
### Keycloak
|
||||
|
||||
@@ -332,3 +370,9 @@ oidc:
|
||||
|
||||
Groups for the [allowed groups filter](#authorize-users-with-filters) need to be specified with their group ID(UUID) instead
|
||||
of the group name.
|
||||
|
||||
## Switching OIDC providers
|
||||
|
||||
Headscale only supports a single OIDC provider in its configuration, but it does store the provider identifier of each user. When switching providers, this might lead to issues with existing users: all user details (name, email, groups) might be identical with the new provider, but the identifier will differ. Headscale will be unable to create a new user as the name and email will already be in use for the existing users.
|
||||
|
||||
At this time, you will need to manually update the `provider_identifier` column in the `users` table for each user with the appropriate value for the new provider. The identifier is built from the `iss` and `sub` claims of the OIDC ID token, for example `https://id.example.com/12340987`.
|
||||
|
||||
141
docs/ref/registration.md
Normal file
141
docs/ref/registration.md
Normal file
@@ -0,0 +1,141 @@
|
||||
# Registration methods
|
||||
|
||||
Headscale supports multiple ways to register a node. The preferred registration method depends on the identity of a node
|
||||
and your use case.
|
||||
|
||||
## Identity model
|
||||
|
||||
Tailscale's identity model distinguishes between personal and tagged nodes:
|
||||
|
||||
- A personal node (or user-owned node) is owned by a human and typically refers to end-user devices such as laptops,
|
||||
workstations or mobile phones. End-user devices are managed by a single user.
|
||||
- A tagged node (or service-based node or non-human node) provides services to the network. Common examples include web-
|
||||
and database servers. Those nodes are typically managed by a team of users. Some additional restrictions apply for
|
||||
tagged nodes, e.g. a tagged node is not allowed to [Tailscale SSH](https://tailscale.com/kb/1193/tailscale-ssh) into a
|
||||
personal node.
|
||||
|
||||
Headscale implements Tailscale's identity model and distinguishes between personal and tagged nodes where a personal
|
||||
node is owned by a Headscale user and a tagged node is owned by a tag. Tagged devices are grouped under the special user
|
||||
`tagged-devices`.
|
||||
|
||||
## Registration methods
|
||||
|
||||
There are two main ways to register new nodes, [web authentication](#web-authentication) and [registration with a pre
|
||||
authenticated key](#pre-authenticated-key). Both methods can be used to register personal and tagged nodes.
|
||||
|
||||
### Web authentication
|
||||
|
||||
Web authentication is the default method to register a new node. It's interactive, where the client initiates the
|
||||
registration and the Headscale administrator needs to approve the new node before it is allowed to join the network. A
|
||||
node can be approved with:
|
||||
|
||||
- Headscale CLI (described in this documentation)
|
||||
- [Headscale API](api.md)
|
||||
- Or delegated to an identity provider via [OpenID Connect](oidc.md)
|
||||
|
||||
Web authentication relies on the presence of a Headscale user. Use the `headscale users` command to create a new user:
|
||||
|
||||
```console
|
||||
headscale users create <USER>
|
||||
```
|
||||
|
||||
=== "Personal devices"
|
||||
|
||||
Run `tailscale up` to login your personal device:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL>
|
||||
```
|
||||
|
||||
Usually, a browser window with further instructions is opened. This page explains how to complete the registration
|
||||
on your Headscale server and it also prints the Auth ID required to approve the node:
|
||||
|
||||
```console
|
||||
headscale auth register --user <USER> --auth-id <AUTH_ID>
|
||||
```
|
||||
|
||||
Congrations, the registration of your personal node is complete and it should be listed as "online" in the output of
|
||||
`headscale nodes list`. The "User" column displays `<USER>` as the owner of the node.
|
||||
|
||||
=== "Tagged devices"
|
||||
|
||||
Your Headscale user needs to be authorized to register tagged devices. This authorization is specified in the
|
||||
[`tagOwners`](https://tailscale.com/kb/1337/policy-syntax#tag-owners) section of the [ACL](acls.md). A simple
|
||||
example looks like this:
|
||||
|
||||
```json title="The user alice can register nodes tagged with tag:server"
|
||||
{
|
||||
"tagOwners": {
|
||||
"tag:server": ["alice@"]
|
||||
},
|
||||
// more rules
|
||||
}
|
||||
```
|
||||
|
||||
Run `tailscale up` and provide at least one tag to login a tagged device:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags tag:<TAG>
|
||||
```
|
||||
|
||||
Usually, a browser window with further instructions is opened. This page explains how to complete the registration
|
||||
on your Headscale server and it also prints the Auth ID required to approve the node:
|
||||
|
||||
```console
|
||||
headscale auth register --user <USER> --auth-id <AUTH_ID>
|
||||
```
|
||||
|
||||
Headscale checks that `<USER>` is allowed to register a node with the specified tag(s) and then transfers ownership
|
||||
of the new node to the special user `tagged-devices`. The registration of a tagged node is complete and it should be
|
||||
listed as "online" in the output of `headscale nodes list`. The "User" column displays `tagged-devices` as the owner
|
||||
of the node. See the "Tags" column for the list of assigned tags.
|
||||
|
||||
### Pre authenticated key
|
||||
|
||||
Registration with a pre authenticated key (or auth key) is a non-interactive way to register a new node. The Headscale
|
||||
administrator creates a preauthkey upfront and this preauthkey can then be used to register a node non-interactively.
|
||||
Its best suited for automation.
|
||||
|
||||
=== "Personal devices"
|
||||
|
||||
A personal node is always assigned to a Headscale user. Use the `headscale users` command to create a new user:
|
||||
|
||||
```console
|
||||
headscale users create <USER>
|
||||
```
|
||||
|
||||
Use the `headscale user list` command to learn its `<USER_ID>` and create a new pre authenticated key for your user:
|
||||
|
||||
```console
|
||||
headscale preauthkeys create --user <USER_ID>
|
||||
```
|
||||
|
||||
The above prints a pre authenticated key with the default settings (can be used once and is valid for one hour). Use
|
||||
this auth key to register a node non-interactively:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
Congrations, the registration of your personal node is complete and it should be listed as "online" in the output of
|
||||
`headscale nodes list`. The "User" column displays `<USER>` as the owner of the node.
|
||||
|
||||
=== "Tagged devices"
|
||||
|
||||
Create a new pre authenticated key and provide at least one tag:
|
||||
|
||||
```console
|
||||
headscale preauthkeys create --tags tag:<TAG>
|
||||
```
|
||||
|
||||
The above prints a pre authenticated key with the default settings (can be used once and is valid for one hour). Use
|
||||
this auth key to register a node non-interactively. You don't need to provide the `--advertise-tags` parameter as
|
||||
the tags are automatically read from the pre authenticated key:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
The registration of a tagged node is complete and it should be listed as "online" in the output of
|
||||
`headscale nodes list`. The "User" column displays `tagged-devices` as the owner of the node. See the "Tags" column for the list of
|
||||
assigned tags.
|
||||
54
docs/ref/tags.md
Normal file
54
docs/ref/tags.md
Normal file
@@ -0,0 +1,54 @@
|
||||
# Tags
|
||||
|
||||
Headscale supports Tailscale tags. Please read [Tailscale's tag documentation](https://tailscale.com/kb/1068/tags) to
|
||||
learn how tags work and how to use them.
|
||||
|
||||
Tags can be applied during [node registration](registration.md):
|
||||
|
||||
- using the `--advertise-tags` flag, see [web authentication for tagged devices](registration.md#__tabbed_1_2)
|
||||
- using a tagged pre authenticated key, see [how to create and use it](registration.md#__tabbed_2_2)
|
||||
|
||||
Administrators can manage tags with:
|
||||
|
||||
- Headscale CLI
|
||||
- [Headscale API](api.md)
|
||||
|
||||
## Common operations
|
||||
|
||||
### Manage tags for a node
|
||||
|
||||
Run `headscale nodes list` to list the tags for a node.
|
||||
|
||||
Use the `headscale nodes tag` command to modify the tags for a node. At least one tag is required and multiple tags can
|
||||
be provided as comma separated list. The following command sets the tags `tag:server` and `tag:prod` on node with ID 1:
|
||||
|
||||
```console
|
||||
headscale nodes tag -i 1 -t tag:server,tag:prod
|
||||
```
|
||||
|
||||
### Convert from personal to tagged node
|
||||
|
||||
Use the `headscale nodes tag` command to convert a personal (user-owned) node to a tagged node:
|
||||
|
||||
```console
|
||||
headscale nodes tag -i <NODE_ID> -t <TAG>
|
||||
```
|
||||
|
||||
The node is now owned by the special user `tagged-devices` and has the specified tags assigned to it.
|
||||
|
||||
### Convert from tagged to personal node
|
||||
|
||||
Tagged nodes can return to personal (user-owned) nodes by re-authenticating with:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags= --force-reauth
|
||||
```
|
||||
|
||||
Usually, a browser window with further instructions is opened. This page explains how to complete the registration on
|
||||
your Headscale server and it also prints the Auth ID required to approve the node:
|
||||
|
||||
```console
|
||||
headscale auth register --user <USER> --auth-id <AUTH_ID>
|
||||
```
|
||||
|
||||
All previously assigned tags get removed and the node is now owned by the user specified in the above command.
|
||||
@@ -50,7 +50,7 @@ Headscale uses [autocert](https://pkg.go.dev/golang.org/x/crypto/acme/autocert),
|
||||
If you want to validate that certificate renewal completed successfully, this can be done either manually, or through external monitoring software. Two examples of doing this manually:
|
||||
|
||||
1. Open the URL for your headscale server in your browser of choice, and manually inspecting the expiry date of the certificate you receive.
|
||||
2. Or, check remotely from CLI using `openssl`:
|
||||
1. Or, check remotely from CLI using `openssl`:
|
||||
|
||||
```console
|
||||
$ openssl s_client -servername [hostname] -connect [hostname]:443 | openssl x509 -noout -dates
|
||||
|
||||
@@ -18,17 +18,17 @@ Registry](https://github.com/juanfont/headscale/pkgs/container/headscale). The c
|
||||
|
||||
## Configure and run headscale
|
||||
|
||||
1. Create a directory on the container host to store headscale's [configuration](../../ref/configuration.md) and the [SQLite](https://www.sqlite.org/) database:
|
||||
1. Create a directory on the container host to store headscale's [configuration](../../ref/configuration.md) and the [SQLite](https://www.sqlite.org/) database:
|
||||
|
||||
```shell
|
||||
mkdir -p ./headscale/{config,lib}
|
||||
cd ./headscale
|
||||
```
|
||||
|
||||
1. Download the example configuration for your chosen version and save it as: `$(pwd)/config/config.yaml`. Adjust the
|
||||
configuration to suit your local environment. See [Configuration](../../ref/configuration.md) for details.
|
||||
1. Download the example configuration for your chosen version and save it as: `$(pwd)/config/config.yaml`. Adjust the
|
||||
configuration to suit your local environment. See [Configuration](../../ref/configuration.md) for details.
|
||||
|
||||
1. Start headscale from within the previously created `./headscale` directory:
|
||||
1. Start headscale from within the previously created `./headscale` directory:
|
||||
|
||||
```shell
|
||||
docker run \
|
||||
@@ -74,7 +74,7 @@ Registry](https://github.com/juanfont/headscale/pkgs/container/headscale). The c
|
||||
test: ["CMD", "headscale", "health"]
|
||||
```
|
||||
|
||||
1. Verify headscale is running:
|
||||
1. Verify headscale is running:
|
||||
|
||||
Follow the container logs:
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ It is recommended to use our DEB packages to install headscale on a Debian based
|
||||
local user to run headscale, provide a default configuration and ship with a systemd service file. Supported
|
||||
distributions are Ubuntu 22.04 or newer, Debian 12 or newer.
|
||||
|
||||
1. Download the [latest headscale package](https://github.com/juanfont/headscale/releases/latest) for your platform (`.deb` for Ubuntu and Debian).
|
||||
1. Download the [latest headscale package](https://github.com/juanfont/headscale/releases/latest) for your platform (`.deb` for Ubuntu and Debian).
|
||||
|
||||
```shell
|
||||
HEADSCALE_VERSION="" # See above URL for latest version, e.g. "X.Y.Z" (NOTE: do not add the "v" prefix!)
|
||||
@@ -18,25 +18,25 @@ distributions are Ubuntu 22.04 or newer, Debian 12 or newer.
|
||||
"https://github.com/juanfont/headscale/releases/download/v${HEADSCALE_VERSION}/headscale_${HEADSCALE_VERSION}_linux_${HEADSCALE_ARCH}.deb"
|
||||
```
|
||||
|
||||
1. Install headscale:
|
||||
1. Install headscale:
|
||||
|
||||
```shell
|
||||
sudo apt install ./headscale.deb
|
||||
```
|
||||
|
||||
1. [Configure headscale by editing the configuration file](../../ref/configuration.md):
|
||||
1. [Configure headscale by editing the configuration file](../../ref/configuration.md):
|
||||
|
||||
```shell
|
||||
sudo nano /etc/headscale/config.yaml
|
||||
```
|
||||
|
||||
1. Enable and start the headscale service:
|
||||
1. Enable and start the headscale service:
|
||||
|
||||
```shell
|
||||
sudo systemctl enable --now headscale
|
||||
```
|
||||
|
||||
1. Verify that headscale is running as intended:
|
||||
1. Verify that headscale is running as intended:
|
||||
|
||||
```shell
|
||||
sudo systemctl status headscale
|
||||
@@ -56,20 +56,20 @@ This section describes the installation of headscale according to the [Requireme
|
||||
assumptions](../requirements.md#assumptions). Headscale is run by a dedicated local user and the service itself is
|
||||
managed by systemd.
|
||||
|
||||
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
|
||||
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
|
||||
|
||||
```shell
|
||||
sudo wget --output-document=/usr/bin/headscale \
|
||||
https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH>
|
||||
```
|
||||
|
||||
1. Make `headscale` executable:
|
||||
1. Make `headscale` executable:
|
||||
|
||||
```shell
|
||||
sudo chmod +x /usr/bin/headscale
|
||||
```
|
||||
|
||||
1. Add a dedicated local user to run headscale:
|
||||
1. Add a dedicated local user to run headscale:
|
||||
|
||||
```shell
|
||||
sudo useradd \
|
||||
@@ -81,38 +81,38 @@ managed by systemd.
|
||||
headscale
|
||||
```
|
||||
|
||||
1. Download the example configuration for your chosen version and save it as: `/etc/headscale/config.yaml`. Adjust the
|
||||
configuration to suit your local environment. See [Configuration](../../ref/configuration.md) for details.
|
||||
1. Download the example configuration for your chosen version and save it as: `/etc/headscale/config.yaml`. Adjust the
|
||||
configuration to suit your local environment. See [Configuration](../../ref/configuration.md) for details.
|
||||
|
||||
```shell
|
||||
sudo mkdir -p /etc/headscale
|
||||
sudo nano /etc/headscale/config.yaml
|
||||
```
|
||||
|
||||
1. Copy [headscale's systemd service file](https://github.com/juanfont/headscale/blob/main/packaging/systemd/headscale.service)
|
||||
to `/etc/systemd/system/headscale.service` and adjust it to suit your local setup. The following parameters likely need
|
||||
to be modified: `ExecStart`, `WorkingDirectory`, `ReadWritePaths`.
|
||||
1. Copy [headscale's systemd service file](https://github.com/juanfont/headscale/blob/main/packaging/systemd/headscale.service)
|
||||
to `/etc/systemd/system/headscale.service` and adjust it to suit your local setup. The following parameters likely need
|
||||
to be modified: `ExecStart`, `WorkingDirectory`, `ReadWritePaths`.
|
||||
|
||||
1. In `/etc/headscale/config.yaml`, override the default `headscale` unix socket with a path that is writable by the
|
||||
`headscale` user or group:
|
||||
1. In `/etc/headscale/config.yaml`, override the default `headscale` unix socket with a path that is writable by the
|
||||
`headscale` user or group:
|
||||
|
||||
```yaml title="config.yaml"
|
||||
unix_socket: /var/run/headscale/headscale.sock
|
||||
```
|
||||
|
||||
1. Reload systemd to load the new configuration file:
|
||||
1. Reload systemd to load the new configuration file:
|
||||
|
||||
```shell
|
||||
systemctl daemon-reload
|
||||
```
|
||||
|
||||
1. Enable and start the new headscale service:
|
||||
1. Enable and start the new headscale service:
|
||||
|
||||
```shell
|
||||
systemctl enable --now headscale
|
||||
```
|
||||
|
||||
1. Verify that headscale is running as intended:
|
||||
1. Verify that headscale is running as intended:
|
||||
|
||||
```shell
|
||||
systemctl status headscale
|
||||
|
||||
@@ -46,7 +46,6 @@ The headscale documentation and the provided examples are written with a few ass
|
||||
|
||||
Please adjust to your local environment accordingly.
|
||||
|
||||
[^1]:
|
||||
The Tailscale client assumes HTTPS on port 443 in certain situations. Serving headscale either via HTTP or via HTTPS
|
||||
on a port other than 443 is possible but sticking with HTTPS on port 443 is strongly recommended for production
|
||||
setups. See [issue 2164](https://github.com/juanfont/headscale/issues/2164) for more information.
|
||||
[^1]: The Tailscale client assumes HTTPS on port 443 in certain situations. Serving headscale either via HTTP or via
|
||||
HTTPS on a port other than 443 is possible but sticking with HTTPS on port 443 is strongly recommended for
|
||||
production setups. See [issue 2164](https://github.com/juanfont/headscale/issues/2164) for more information.
|
||||
|
||||
@@ -1,10 +1,50 @@
|
||||
# Upgrade an existing installation
|
||||
|
||||
Update an existing headscale installation to a new version:
|
||||
!!! tip "Required update path"
|
||||
|
||||
Its required to update from one stable version to the next (e.g. 0.26.0 → 0.27.1 → 0.28.0) without skipping minor
|
||||
versions in between. You should always pick the latest available patch release.
|
||||
|
||||
Update an existing Headscale installation to a new version:
|
||||
|
||||
- Read the announcement on the [GitHub releases](https://github.com/juanfont/headscale/releases) page for the new
|
||||
version. It lists the changes of the release along with possible breaking changes.
|
||||
- **Create a backup of your database.**
|
||||
- Update headscale to the new version, preferably by following the same installation method.
|
||||
version. It lists the changes of the release along with possible breaking changes and version-specific upgrade
|
||||
instructions.
|
||||
- Stop Headscale
|
||||
- **[Create a backup of your installation](#backup)**
|
||||
- Update Headscale to the new version, preferably by following the same installation method.
|
||||
- Compare and update the [configuration](../ref/configuration.md) file.
|
||||
- Restart headscale.
|
||||
- Start Headscale
|
||||
|
||||
## Backup
|
||||
|
||||
Headscale applies database migrations during upgrades and we highly recommend to create a backup of your database before
|
||||
upgrading. A full backup of Headscale depends on your individual setup, but below are some typical setup scenarios.
|
||||
|
||||
=== "Standard installation"
|
||||
|
||||
A installation that follows our [official releases](install/official.md) setup guide uses the following paths:
|
||||
|
||||
- [Configuration file](../ref/configuration.md): `/etc/headscale/config.yaml`
|
||||
- Data directory: `/var/lib/headscale`
|
||||
- SQLite as database: `/var/lib/headscale/db.sqlite`
|
||||
|
||||
```console
|
||||
TIMESTAMP=$(date +%Y%m%d%H%M%S)
|
||||
cp -aR /etc/headscale /etc/headscale.backup-$TIMESTAMP
|
||||
cp -aR /var/lib/headscale /var/lib/headscale.backup-$TIMESTAMP
|
||||
```
|
||||
|
||||
=== "Container"
|
||||
|
||||
A installation that follows our [container](install/container.md) setup guide uses a single source volume directory
|
||||
that contains the configuration file, data directory and the SQLite database.
|
||||
|
||||
```console
|
||||
cp -aR /path/to/headscale /path/to/headscale.backup-$(date +%Y%m%d%H%M%S)
|
||||
```
|
||||
|
||||
=== "PostgreSQL"
|
||||
|
||||
Please follow PostgreSQL's [Backup and Restore](https://www.postgresql.org/docs/current/backup.html) documentation
|
||||
to create a backup of your PostgreSQL database.
|
||||
|
||||
@@ -6,7 +6,7 @@ This documentation has the goal of showing how a user can use the official Andro
|
||||
|
||||
Install the official Tailscale Android client from the [Google Play Store](https://play.google.com/store/apps/details?id=com.tailscale.ipn) or [F-Droid](https://f-droid.org/packages/com.tailscale.ipn/).
|
||||
|
||||
## Connect via normal, interactive login
|
||||
## Connect via web authentication
|
||||
|
||||
- Open the app and select the settings menu in the upper-right corner
|
||||
- Tap on `Accounts`
|
||||
@@ -15,7 +15,7 @@ Install the official Tailscale Android client from the [Google Play Store](https
|
||||
- The client connects automatically as soon as the node registration is complete on headscale. Until then, nothing is
|
||||
visible in the server logs.
|
||||
|
||||
## Connect using a preauthkey
|
||||
## Connect using a pre authenticated key
|
||||
|
||||
- Open the app and select the settings menu in the upper-right corner
|
||||
- Tap on `Accounts`
|
||||
@@ -24,5 +24,5 @@ Install the official Tailscale Android client from the [Google Play Store](https
|
||||
- Open the settings menu in the upper-right corner
|
||||
- Tap on `Accounts`
|
||||
- In the kebab menu icon (three dots) in the upper-right corner select `Use an auth key`
|
||||
- Enter your [preauthkey generated from headscale](../getting-started.md#using-a-preauthkey)
|
||||
- Enter your [preauthkey generated from headscale](../../ref/registration.md#pre-authenticated-key)
|
||||
- If needed, tap `Log in` on the main screen. You should now be connected to your headscale.
|
||||
|
||||
@@ -54,6 +54,6 @@ This typically means that the registry keys above was not set appropriately.
|
||||
To reset and try again, it is important to do the following:
|
||||
|
||||
1. Shut down the Tailscale service (or the client running in the tray)
|
||||
2. Delete Tailscale Application data folder, located at `C:\Users\<USERNAME>\AppData\Local\Tailscale` and try to connect again.
|
||||
3. Ensure the Windows node is deleted from headscale (to ensure fresh setup)
|
||||
4. Start Tailscale on the Windows machine and retry the login.
|
||||
1. Delete Tailscale Application data folder, located at `C:\Users\<USERNAME>\AppData\Local\Tailscale` and try to connect again.
|
||||
1. Ensure the Windows node is deleted from headscale (to ensure fresh setup)
|
||||
1. Start Tailscale on the Windows machine and retry the login.
|
||||
|
||||
@@ -5,13 +5,13 @@ This page helps you get started with headscale and provides a few usage examples
|
||||
|
||||
!!! note "Prerequisites"
|
||||
|
||||
* Headscale is installed and running as system service. Read the [setup section](../setup/requirements.md) for
|
||||
- Headscale is installed and running as system service. Read the [setup section](../setup/requirements.md) for
|
||||
installation instructions.
|
||||
* The configuration file exists and is adjusted to suit your environment, see
|
||||
- The configuration file exists and is adjusted to suit your environment, see
|
||||
[Configuration](../ref/configuration.md) for details.
|
||||
* Headscale is reachable from the Internet. Verify this by visiting the health endpoint:
|
||||
- Headscale is reachable from the Internet. Verify this by visiting the health endpoint:
|
||||
https://headscale.example.com/health
|
||||
* The Tailscale client is installed, see [Client and operating system support](../about/clients.md) for more
|
||||
- The Tailscale client is installed, see [Client and operating system support](../about/clients.md) for more
|
||||
information.
|
||||
|
||||
## Getting help
|
||||
@@ -48,9 +48,9 @@ options, run:
|
||||
communicate with the headscale service you have to make sure the unix socket is accessible by the user that runs
|
||||
the commands. In general you can achieve this by any of the following methods:
|
||||
|
||||
* using `sudo`
|
||||
* run the commands as user `headscale`
|
||||
* add your user to the `headscale` group
|
||||
- using `sudo`
|
||||
- run the commands as user `headscale`
|
||||
- add your user to the `headscale` group
|
||||
|
||||
To verify you can run the following command using your preferred method:
|
||||
|
||||
@@ -60,10 +60,9 @@ options, run:
|
||||
|
||||
## Manage headscale users
|
||||
|
||||
In headscale, a node (also known as machine or device) is always assigned to a
|
||||
headscale user. Such a headscale user may have many nodes assigned to them and
|
||||
can be managed with the `headscale users` command. Invoke the built-in help for
|
||||
more information: `headscale users --help`.
|
||||
In headscale, a node (also known as machine or device) is [typically assigned to a headscale
|
||||
user](../ref/registration.md#identity-model). Such a headscale user may have many nodes assigned to them and can be
|
||||
managed with the `headscale users` command. Invoke the built-in help for more information: `headscale users --help`.
|
||||
|
||||
### Create a headscale user
|
||||
|
||||
@@ -97,11 +96,12 @@ more information: `headscale users --help`.
|
||||
|
||||
## Register a node
|
||||
|
||||
One has to register a node first to use headscale as coordination with Tailscale. The following examples work for the
|
||||
Tailscale client on Linux/BSD operating systems. Alternatively, follow the instructions to connect
|
||||
[Android](connect/android.md), [Apple](connect/apple.md) or [Windows](connect/windows.md) devices.
|
||||
One has to [register a node](../ref/registration.md) first to use headscale as coordination server with Tailscale. The
|
||||
following examples work for the Tailscale client on Linux/BSD operating systems. Alternatively, follow the instructions
|
||||
to connect [Android](connect/android.md), [Apple](connect/apple.md) or [Windows](connect/windows.md) devices. Read
|
||||
[registration methods](../ref/registration.md) for an overview of available registration methods.
|
||||
|
||||
### Normal, interactive login
|
||||
### [Web authentication](../ref/registration.md#web-authentication)
|
||||
|
||||
On a client machine, run the `tailscale up` command and provide the FQDN of your headscale instance as argument:
|
||||
|
||||
@@ -109,27 +109,26 @@ On a client machine, run the `tailscale up` command and provide the FQDN of your
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL>
|
||||
```
|
||||
|
||||
Usually, a browser window with further instructions is opened and contains the value for `<YOUR_MACHINE_KEY>`. Approve
|
||||
and register the node on your headscale server:
|
||||
Usually, a browser window with further instructions is opened. This page explains how to complete the registration on
|
||||
your headscale server and it also prints the Auth ID required to approve the node:
|
||||
|
||||
=== "Native"
|
||||
|
||||
```shell
|
||||
headscale nodes register --user <USER> --key <YOUR_MACHINE_KEY>
|
||||
headscale auth register --user <USER> --auth-id <AUTH_ID>
|
||||
```
|
||||
|
||||
=== "Container"
|
||||
|
||||
```shell
|
||||
docker exec -it headscale \
|
||||
headscale nodes register --user <USER> --key <YOUR_MACHINE_KEY>
|
||||
headscale auth register --user <USER> --auth-id <AUTH_ID>
|
||||
```
|
||||
|
||||
### Using a preauthkey
|
||||
### [Pre authenticated key](../ref/registration.md#pre-authenticated-key)
|
||||
|
||||
It is also possible to generate a preauthkey and register a node non-interactively. First, generate a preauthkey on the
|
||||
headscale instance. By default, the key is valid for one hour and can only be used once (see `headscale preauthkeys
|
||||
--help` for other options):
|
||||
headscale instance. By default, the key is valid for one hour and can only be used once (see `headscale preauthkeys --help` for other options):
|
||||
|
||||
=== "Native"
|
||||
|
||||
|
||||
6
flake.lock
generated
6
flake.lock
generated
@@ -20,11 +20,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1768875095,
|
||||
"narHash": "sha256-dYP3DjiL7oIiiq3H65tGIXXIT1Waiadmv93JS0sS+8A=",
|
||||
"lastModified": 1772956932,
|
||||
"narHash": "sha256-M0yS4AafhKxPPmOHGqIV0iKxgNO8bHDWdl1kOwGBwRY=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "ed142ab1b3a092c4d149245d0c4126a5d7ea00b0",
|
||||
"rev": "608d0cadfed240589a7eea422407a547ad626a14",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
73
flake.nix
73
flake.nix
@@ -26,8 +26,8 @@
|
||||
overlays.default = _: prev:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${prev.stdenv.hostPlatform.system};
|
||||
buildGo = pkgs.buildGo125Module;
|
||||
vendorHash = "sha256-dWsDgI5K+8mFw4PA5gfFBPCSqBJp5RcZzm0ML1+HsWw=";
|
||||
buildGo = pkgs.buildGo126Module;
|
||||
vendorHash = "sha256-oUN53ELb3+xn4yA7lEfXyT2c7NxbQC6RtbkGVq6+RLU=";
|
||||
in
|
||||
{
|
||||
headscale = buildGo {
|
||||
@@ -62,16 +62,16 @@
|
||||
|
||||
protoc-gen-grpc-gateway = buildGo rec {
|
||||
pname = "grpc-gateway";
|
||||
version = "2.27.4";
|
||||
version = "2.27.7";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "grpc-ecosystem";
|
||||
repo = "grpc-gateway";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-4bhEQTVV04EyX/qJGNMIAQDcMWcDVr1tFkEjBHpc2CA=";
|
||||
sha256 = "sha256-6R0EhNnOBEISJddjkbVTcBvUuU5U3r9Hu2UPfAZDep4=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-ohZW/uPdt08Y2EpIQ2yeyGSjV9O58+QbQQqYrs6O8/g=";
|
||||
vendorHash = "sha256-SOAbRrzMf2rbKaG9PGSnPSLY/qZVgbHcNjOLmVonycY=";
|
||||
|
||||
nativeBuildInputs = [ pkgs.installShellFiles ];
|
||||
|
||||
@@ -94,19 +94,46 @@
|
||||
subPackages = [ "." ];
|
||||
};
|
||||
|
||||
# Upstream does not override buildGoModule properly,
|
||||
# importing a specific module, so comment out for now.
|
||||
# golangci-lint = prev.golangci-lint.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
# golangci-lint-langserver = prev.golangci-lint.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
# Build golangci-lint with Go 1.26 (upstream uses hardcoded Go version)
|
||||
golangci-lint = buildGo rec {
|
||||
pname = "golangci-lint";
|
||||
version = "2.9.0";
|
||||
|
||||
# The package uses buildGo125Module, not the convention.
|
||||
# goreleaser = prev.goreleaser.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "golangci";
|
||||
repo = "golangci-lint";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-8LEtm1v0slKwdLBtS41OilKJLXytSxcI9fUlZbj5Gfw=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-w8JfF6n1ylrU652HEv/cYdsOdDZz9J2uRQDqxObyhkY=";
|
||||
|
||||
subPackages = [ "cmd/golangci-lint" ];
|
||||
|
||||
nativeBuildInputs = [ pkgs.installShellFiles ];
|
||||
|
||||
ldflags = [
|
||||
"-s"
|
||||
"-w"
|
||||
"-X main.version=${version}"
|
||||
"-X main.commit=v${version}"
|
||||
"-X main.date=1970-01-01T00:00:00Z"
|
||||
];
|
||||
|
||||
postInstall = ''
|
||||
for shell in bash zsh fish; do
|
||||
HOME=$TMPDIR $out/bin/golangci-lint completion $shell > golangci-lint.$shell
|
||||
installShellCompletion golangci-lint.$shell
|
||||
done
|
||||
'';
|
||||
|
||||
meta = {
|
||||
description = "Fast linters runner for Go";
|
||||
homepage = "https://golangci-lint.run/";
|
||||
changelog = "https://github.com/golangci/golangci-lint/blob/v${version}/CHANGELOG.md";
|
||||
mainProgram = "golangci-lint";
|
||||
};
|
||||
};
|
||||
|
||||
gotestsum = prev.gotestsum.override {
|
||||
buildGoModule = buildGo;
|
||||
@@ -120,9 +147,9 @@
|
||||
buildGoModule = buildGo;
|
||||
};
|
||||
|
||||
# gopls = prev.gopls.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
gopls = prev.gopls.override {
|
||||
buildGoLatestModule = buildGo;
|
||||
};
|
||||
};
|
||||
}
|
||||
// flake-utils.lib.eachDefaultSystem
|
||||
@@ -132,7 +159,7 @@
|
||||
overlays = [ self.overlays.default ];
|
||||
inherit system;
|
||||
};
|
||||
buildDeps = with pkgs; [ git go_1_25 gnumake ];
|
||||
buildDeps = with pkgs; [ git go_1_26 gnumake ];
|
||||
devDeps = with pkgs;
|
||||
buildDeps
|
||||
++ [
|
||||
@@ -152,6 +179,10 @@
|
||||
yq-go
|
||||
ripgrep
|
||||
postgresql
|
||||
python314Packages.mdformat
|
||||
python314Packages.mdformat-footnote
|
||||
python314Packages.mdformat-frontmatter
|
||||
python314Packages.mdformat-mkdocs
|
||||
prek
|
||||
|
||||
# 'dot' is needed for pprof graphs
|
||||
|
||||
351
gen/go/headscale/v1/auth.pb.go
Normal file
351
gen/go/headscale/v1/auth.pb.go
Normal file
@@ -0,0 +1,351 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.11
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/auth.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type AuthRegisterRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
AuthId string `protobuf:"bytes,2,opt,name=auth_id,json=authId,proto3" json:"auth_id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *AuthRegisterRequest) Reset() {
|
||||
*x = AuthRegisterRequest{}
|
||||
mi := &file_headscale_v1_auth_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *AuthRegisterRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*AuthRegisterRequest) ProtoMessage() {}
|
||||
|
||||
func (x *AuthRegisterRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_auth_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use AuthRegisterRequest.ProtoReflect.Descriptor instead.
|
||||
func (*AuthRegisterRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_auth_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *AuthRegisterRequest) GetUser() string {
|
||||
if x != nil {
|
||||
return x.User
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *AuthRegisterRequest) GetAuthId() string {
|
||||
if x != nil {
|
||||
return x.AuthId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type AuthRegisterResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *AuthRegisterResponse) Reset() {
|
||||
*x = AuthRegisterResponse{}
|
||||
mi := &file_headscale_v1_auth_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *AuthRegisterResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*AuthRegisterResponse) ProtoMessage() {}
|
||||
|
||||
func (x *AuthRegisterResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_auth_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use AuthRegisterResponse.ProtoReflect.Descriptor instead.
|
||||
func (*AuthRegisterResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_auth_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *AuthRegisterResponse) GetNode() *Node {
|
||||
if x != nil {
|
||||
return x.Node
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type AuthApproveRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
AuthId string `protobuf:"bytes,1,opt,name=auth_id,json=authId,proto3" json:"auth_id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *AuthApproveRequest) Reset() {
|
||||
*x = AuthApproveRequest{}
|
||||
mi := &file_headscale_v1_auth_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *AuthApproveRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*AuthApproveRequest) ProtoMessage() {}
|
||||
|
||||
func (x *AuthApproveRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_auth_proto_msgTypes[2]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use AuthApproveRequest.ProtoReflect.Descriptor instead.
|
||||
func (*AuthApproveRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_auth_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *AuthApproveRequest) GetAuthId() string {
|
||||
if x != nil {
|
||||
return x.AuthId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type AuthApproveResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *AuthApproveResponse) Reset() {
|
||||
*x = AuthApproveResponse{}
|
||||
mi := &file_headscale_v1_auth_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *AuthApproveResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*AuthApproveResponse) ProtoMessage() {}
|
||||
|
||||
func (x *AuthApproveResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_auth_proto_msgTypes[3]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use AuthApproveResponse.ProtoReflect.Descriptor instead.
|
||||
func (*AuthApproveResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_auth_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
type AuthRejectRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
AuthId string `protobuf:"bytes,1,opt,name=auth_id,json=authId,proto3" json:"auth_id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *AuthRejectRequest) Reset() {
|
||||
*x = AuthRejectRequest{}
|
||||
mi := &file_headscale_v1_auth_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *AuthRejectRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*AuthRejectRequest) ProtoMessage() {}
|
||||
|
||||
func (x *AuthRejectRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_auth_proto_msgTypes[4]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use AuthRejectRequest.ProtoReflect.Descriptor instead.
|
||||
func (*AuthRejectRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_auth_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *AuthRejectRequest) GetAuthId() string {
|
||||
if x != nil {
|
||||
return x.AuthId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type AuthRejectResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *AuthRejectResponse) Reset() {
|
||||
*x = AuthRejectResponse{}
|
||||
mi := &file_headscale_v1_auth_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *AuthRejectResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*AuthRejectResponse) ProtoMessage() {}
|
||||
|
||||
func (x *AuthRejectResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_auth_proto_msgTypes[5]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use AuthRejectResponse.ProtoReflect.Descriptor instead.
|
||||
func (*AuthRejectResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_auth_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
var File_headscale_v1_auth_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_headscale_v1_auth_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x17headscale/v1/auth.proto\x12\fheadscale.v1\x1a\x17headscale/v1/node.proto\"B\n" +
|
||||
"\x13AuthRegisterRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\tR\x04user\x12\x17\n" +
|
||||
"\aauth_id\x18\x02 \x01(\tR\x06authId\">\n" +
|
||||
"\x14AuthRegisterResponse\x12&\n" +
|
||||
"\x04node\x18\x01 \x01(\v2\x12.headscale.v1.NodeR\x04node\"-\n" +
|
||||
"\x12AuthApproveRequest\x12\x17\n" +
|
||||
"\aauth_id\x18\x01 \x01(\tR\x06authId\"\x15\n" +
|
||||
"\x13AuthApproveResponse\",\n" +
|
||||
"\x11AuthRejectRequest\x12\x17\n" +
|
||||
"\aauth_id\x18\x01 \x01(\tR\x06authId\"\x14\n" +
|
||||
"\x12AuthRejectResponseB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
|
||||
var (
|
||||
file_headscale_v1_auth_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_auth_proto_rawDescData []byte
|
||||
)
|
||||
|
||||
func file_headscale_v1_auth_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_auth_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_auth_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_auth_proto_rawDesc), len(file_headscale_v1_auth_proto_rawDesc)))
|
||||
})
|
||||
return file_headscale_v1_auth_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_headscale_v1_auth_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||
var file_headscale_v1_auth_proto_goTypes = []any{
|
||||
(*AuthRegisterRequest)(nil), // 0: headscale.v1.AuthRegisterRequest
|
||||
(*AuthRegisterResponse)(nil), // 1: headscale.v1.AuthRegisterResponse
|
||||
(*AuthApproveRequest)(nil), // 2: headscale.v1.AuthApproveRequest
|
||||
(*AuthApproveResponse)(nil), // 3: headscale.v1.AuthApproveResponse
|
||||
(*AuthRejectRequest)(nil), // 4: headscale.v1.AuthRejectRequest
|
||||
(*AuthRejectResponse)(nil), // 5: headscale.v1.AuthRejectResponse
|
||||
(*Node)(nil), // 6: headscale.v1.Node
|
||||
}
|
||||
var file_headscale_v1_auth_proto_depIdxs = []int32{
|
||||
6, // 0: headscale.v1.AuthRegisterResponse.node:type_name -> headscale.v1.Node
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_auth_proto_init() }
|
||||
func file_headscale_v1_auth_proto_init() {
|
||||
if File_headscale_v1_auth_proto != nil {
|
||||
return
|
||||
}
|
||||
file_headscale_v1_node_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_auth_proto_rawDesc), len(file_headscale_v1_auth_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 6,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_headscale_v1_auth_proto_goTypes,
|
||||
DependencyIndexes: file_headscale_v1_auth_proto_depIdxs,
|
||||
MessageInfos: file_headscale_v1_auth_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_auth_proto = out.File
|
||||
file_headscale_v1_auth_proto_goTypes = nil
|
||||
file_headscale_v1_auth_proto_depIdxs = nil
|
||||
}
|
||||
@@ -106,10 +106,10 @@ var File_headscale_v1_headscale_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_headscale_v1_headscale_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x1cheadscale/v1/headscale.proto\x12\fheadscale.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17headscale/v1/user.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/node.proto\x1a\x19headscale/v1/apikey.proto\x1a\x19headscale/v1/policy.proto\"\x0f\n" +
|
||||
"\x1cheadscale/v1/headscale.proto\x12\fheadscale.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17headscale/v1/user.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/node.proto\x1a\x19headscale/v1/apikey.proto\x1a\x17headscale/v1/auth.proto\x1a\x19headscale/v1/policy.proto\"\x0f\n" +
|
||||
"\rHealthRequest\"E\n" +
|
||||
"\x0eHealthResponse\x123\n" +
|
||||
"\x15database_connectivity\x18\x01 \x01(\bR\x14databaseConnectivity2\x8c\x17\n" +
|
||||
"\x15database_connectivity\x18\x01 \x01(\bR\x14databaseConnectivity2\xeb\x19\n" +
|
||||
"\x10HeadscaleService\x12h\n" +
|
||||
"\n" +
|
||||
"CreateUser\x12\x1f.headscale.v1.CreateUserRequest\x1a .headscale.v1.CreateUserResponse\"\x17\x82\xd3\xe4\x93\x02\x11:\x01*\"\f/api/v1/user\x12\x80\x01\n" +
|
||||
@@ -134,7 +134,11 @@ const file_headscale_v1_headscale_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"RenameNode\x12\x1f.headscale.v1.RenameNodeRequest\x1a .headscale.v1.RenameNodeResponse\"0\x82\xd3\xe4\x93\x02*\"(/api/v1/node/{node_id}/rename/{new_name}\x12b\n" +
|
||||
"\tListNodes\x12\x1e.headscale.v1.ListNodesRequest\x1a\x1f.headscale.v1.ListNodesResponse\"\x14\x82\xd3\xe4\x93\x02\x0e\x12\f/api/v1/node\x12\x80\x01\n" +
|
||||
"\x0fBackfillNodeIPs\x12$.headscale.v1.BackfillNodeIPsRequest\x1a%.headscale.v1.BackfillNodeIPsResponse\" \x82\xd3\xe4\x93\x02\x1a\"\x18/api/v1/node/backfillips\x12p\n" +
|
||||
"\x0fBackfillNodeIPs\x12$.headscale.v1.BackfillNodeIPsRequest\x1a%.headscale.v1.BackfillNodeIPsResponse\" \x82\xd3\xe4\x93\x02\x1a\"\x18/api/v1/node/backfillips\x12w\n" +
|
||||
"\fAuthRegister\x12!.headscale.v1.AuthRegisterRequest\x1a\".headscale.v1.AuthRegisterResponse\" \x82\xd3\xe4\x93\x02\x1a:\x01*\"\x15/api/v1/auth/register\x12s\n" +
|
||||
"\vAuthApprove\x12 .headscale.v1.AuthApproveRequest\x1a!.headscale.v1.AuthApproveResponse\"\x1f\x82\xd3\xe4\x93\x02\x19:\x01*\"\x14/api/v1/auth/approve\x12o\n" +
|
||||
"\n" +
|
||||
"AuthReject\x12\x1f.headscale.v1.AuthRejectRequest\x1a .headscale.v1.AuthRejectResponse\"\x1e\x82\xd3\xe4\x93\x02\x18:\x01*\"\x13/api/v1/auth/reject\x12p\n" +
|
||||
"\fCreateApiKey\x12!.headscale.v1.CreateApiKeyRequest\x1a\".headscale.v1.CreateApiKeyResponse\"\x19\x82\xd3\xe4\x93\x02\x13:\x01*\"\x0e/api/v1/apikey\x12w\n" +
|
||||
"\fExpireApiKey\x12!.headscale.v1.ExpireApiKeyRequest\x1a\".headscale.v1.ExpireApiKeyResponse\" \x82\xd3\xe4\x93\x02\x1a:\x01*\"\x15/api/v1/apikey/expire\x12j\n" +
|
||||
"\vListApiKeys\x12 .headscale.v1.ListApiKeysRequest\x1a!.headscale.v1.ListApiKeysResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/apikey\x12v\n" +
|
||||
@@ -177,36 +181,42 @@ var file_headscale_v1_headscale_proto_goTypes = []any{
|
||||
(*RenameNodeRequest)(nil), // 17: headscale.v1.RenameNodeRequest
|
||||
(*ListNodesRequest)(nil), // 18: headscale.v1.ListNodesRequest
|
||||
(*BackfillNodeIPsRequest)(nil), // 19: headscale.v1.BackfillNodeIPsRequest
|
||||
(*CreateApiKeyRequest)(nil), // 20: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 21: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 22: headscale.v1.ListApiKeysRequest
|
||||
(*DeleteApiKeyRequest)(nil), // 23: headscale.v1.DeleteApiKeyRequest
|
||||
(*GetPolicyRequest)(nil), // 24: headscale.v1.GetPolicyRequest
|
||||
(*SetPolicyRequest)(nil), // 25: headscale.v1.SetPolicyRequest
|
||||
(*CreateUserResponse)(nil), // 26: headscale.v1.CreateUserResponse
|
||||
(*RenameUserResponse)(nil), // 27: headscale.v1.RenameUserResponse
|
||||
(*DeleteUserResponse)(nil), // 28: headscale.v1.DeleteUserResponse
|
||||
(*ListUsersResponse)(nil), // 29: headscale.v1.ListUsersResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 30: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 31: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*DeletePreAuthKeyResponse)(nil), // 32: headscale.v1.DeletePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 33: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateNodeResponse)(nil), // 34: headscale.v1.DebugCreateNodeResponse
|
||||
(*GetNodeResponse)(nil), // 35: headscale.v1.GetNodeResponse
|
||||
(*SetTagsResponse)(nil), // 36: headscale.v1.SetTagsResponse
|
||||
(*SetApprovedRoutesResponse)(nil), // 37: headscale.v1.SetApprovedRoutesResponse
|
||||
(*RegisterNodeResponse)(nil), // 38: headscale.v1.RegisterNodeResponse
|
||||
(*DeleteNodeResponse)(nil), // 39: headscale.v1.DeleteNodeResponse
|
||||
(*ExpireNodeResponse)(nil), // 40: headscale.v1.ExpireNodeResponse
|
||||
(*RenameNodeResponse)(nil), // 41: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesResponse)(nil), // 42: headscale.v1.ListNodesResponse
|
||||
(*BackfillNodeIPsResponse)(nil), // 43: headscale.v1.BackfillNodeIPsResponse
|
||||
(*CreateApiKeyResponse)(nil), // 44: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 45: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 46: headscale.v1.ListApiKeysResponse
|
||||
(*DeleteApiKeyResponse)(nil), // 47: headscale.v1.DeleteApiKeyResponse
|
||||
(*GetPolicyResponse)(nil), // 48: headscale.v1.GetPolicyResponse
|
||||
(*SetPolicyResponse)(nil), // 49: headscale.v1.SetPolicyResponse
|
||||
(*AuthRegisterRequest)(nil), // 20: headscale.v1.AuthRegisterRequest
|
||||
(*AuthApproveRequest)(nil), // 21: headscale.v1.AuthApproveRequest
|
||||
(*AuthRejectRequest)(nil), // 22: headscale.v1.AuthRejectRequest
|
||||
(*CreateApiKeyRequest)(nil), // 23: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 24: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 25: headscale.v1.ListApiKeysRequest
|
||||
(*DeleteApiKeyRequest)(nil), // 26: headscale.v1.DeleteApiKeyRequest
|
||||
(*GetPolicyRequest)(nil), // 27: headscale.v1.GetPolicyRequest
|
||||
(*SetPolicyRequest)(nil), // 28: headscale.v1.SetPolicyRequest
|
||||
(*CreateUserResponse)(nil), // 29: headscale.v1.CreateUserResponse
|
||||
(*RenameUserResponse)(nil), // 30: headscale.v1.RenameUserResponse
|
||||
(*DeleteUserResponse)(nil), // 31: headscale.v1.DeleteUserResponse
|
||||
(*ListUsersResponse)(nil), // 32: headscale.v1.ListUsersResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 33: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 34: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*DeletePreAuthKeyResponse)(nil), // 35: headscale.v1.DeletePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 36: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateNodeResponse)(nil), // 37: headscale.v1.DebugCreateNodeResponse
|
||||
(*GetNodeResponse)(nil), // 38: headscale.v1.GetNodeResponse
|
||||
(*SetTagsResponse)(nil), // 39: headscale.v1.SetTagsResponse
|
||||
(*SetApprovedRoutesResponse)(nil), // 40: headscale.v1.SetApprovedRoutesResponse
|
||||
(*RegisterNodeResponse)(nil), // 41: headscale.v1.RegisterNodeResponse
|
||||
(*DeleteNodeResponse)(nil), // 42: headscale.v1.DeleteNodeResponse
|
||||
(*ExpireNodeResponse)(nil), // 43: headscale.v1.ExpireNodeResponse
|
||||
(*RenameNodeResponse)(nil), // 44: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesResponse)(nil), // 45: headscale.v1.ListNodesResponse
|
||||
(*BackfillNodeIPsResponse)(nil), // 46: headscale.v1.BackfillNodeIPsResponse
|
||||
(*AuthRegisterResponse)(nil), // 47: headscale.v1.AuthRegisterResponse
|
||||
(*AuthApproveResponse)(nil), // 48: headscale.v1.AuthApproveResponse
|
||||
(*AuthRejectResponse)(nil), // 49: headscale.v1.AuthRejectResponse
|
||||
(*CreateApiKeyResponse)(nil), // 50: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 51: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 52: headscale.v1.ListApiKeysResponse
|
||||
(*DeleteApiKeyResponse)(nil), // 53: headscale.v1.DeleteApiKeyResponse
|
||||
(*GetPolicyResponse)(nil), // 54: headscale.v1.GetPolicyResponse
|
||||
(*SetPolicyResponse)(nil), // 55: headscale.v1.SetPolicyResponse
|
||||
}
|
||||
var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
2, // 0: headscale.v1.HeadscaleService.CreateUser:input_type -> headscale.v1.CreateUserRequest
|
||||
@@ -227,40 +237,46 @@ var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
17, // 15: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
|
||||
18, // 16: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
|
||||
19, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
|
||||
20, // 18: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
21, // 19: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
22, // 20: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
23, // 21: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
|
||||
24, // 22: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest
|
||||
25, // 23: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest
|
||||
0, // 24: headscale.v1.HeadscaleService.Health:input_type -> headscale.v1.HealthRequest
|
||||
26, // 25: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
|
||||
27, // 26: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
|
||||
28, // 27: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
|
||||
29, // 28: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
|
||||
30, // 29: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
31, // 30: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
32, // 31: headscale.v1.HeadscaleService.DeletePreAuthKey:output_type -> headscale.v1.DeletePreAuthKeyResponse
|
||||
33, // 32: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
34, // 33: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
|
||||
35, // 34: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
|
||||
36, // 35: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
37, // 36: headscale.v1.HeadscaleService.SetApprovedRoutes:output_type -> headscale.v1.SetApprovedRoutesResponse
|
||||
38, // 37: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
|
||||
39, // 38: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
|
||||
40, // 39: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
|
||||
41, // 40: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
|
||||
42, // 41: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
|
||||
43, // 42: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
|
||||
44, // 43: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
45, // 44: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
46, // 45: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
47, // 46: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
|
||||
48, // 47: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse
|
||||
49, // 48: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse
|
||||
1, // 49: headscale.v1.HeadscaleService.Health:output_type -> headscale.v1.HealthResponse
|
||||
25, // [25:50] is the sub-list for method output_type
|
||||
0, // [0:25] is the sub-list for method input_type
|
||||
20, // 18: headscale.v1.HeadscaleService.AuthRegister:input_type -> headscale.v1.AuthRegisterRequest
|
||||
21, // 19: headscale.v1.HeadscaleService.AuthApprove:input_type -> headscale.v1.AuthApproveRequest
|
||||
22, // 20: headscale.v1.HeadscaleService.AuthReject:input_type -> headscale.v1.AuthRejectRequest
|
||||
23, // 21: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
24, // 22: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
25, // 23: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
26, // 24: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
|
||||
27, // 25: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest
|
||||
28, // 26: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest
|
||||
0, // 27: headscale.v1.HeadscaleService.Health:input_type -> headscale.v1.HealthRequest
|
||||
29, // 28: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
|
||||
30, // 29: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
|
||||
31, // 30: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
|
||||
32, // 31: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
|
||||
33, // 32: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
34, // 33: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
35, // 34: headscale.v1.HeadscaleService.DeletePreAuthKey:output_type -> headscale.v1.DeletePreAuthKeyResponse
|
||||
36, // 35: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
37, // 36: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
|
||||
38, // 37: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
|
||||
39, // 38: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
40, // 39: headscale.v1.HeadscaleService.SetApprovedRoutes:output_type -> headscale.v1.SetApprovedRoutesResponse
|
||||
41, // 40: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
|
||||
42, // 41: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
|
||||
43, // 42: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
|
||||
44, // 43: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
|
||||
45, // 44: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
|
||||
46, // 45: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
|
||||
47, // 46: headscale.v1.HeadscaleService.AuthRegister:output_type -> headscale.v1.AuthRegisterResponse
|
||||
48, // 47: headscale.v1.HeadscaleService.AuthApprove:output_type -> headscale.v1.AuthApproveResponse
|
||||
49, // 48: headscale.v1.HeadscaleService.AuthReject:output_type -> headscale.v1.AuthRejectResponse
|
||||
50, // 49: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
51, // 50: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
52, // 51: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
53, // 52: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
|
||||
54, // 53: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse
|
||||
55, // 54: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse
|
||||
1, // 55: headscale.v1.HeadscaleService.Health:output_type -> headscale.v1.HealthResponse
|
||||
28, // [28:56] is the sub-list for method output_type
|
||||
0, // [0:28] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
@@ -275,6 +291,7 @@ func file_headscale_v1_headscale_proto_init() {
|
||||
file_headscale_v1_preauthkey_proto_init()
|
||||
file_headscale_v1_node_proto_init()
|
||||
file_headscale_v1_apikey_proto_init()
|
||||
file_headscale_v1_auth_proto_init()
|
||||
file_headscale_v1_policy_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
|
||||
@@ -709,6 +709,87 @@ func local_request_HeadscaleService_BackfillNodeIPs_0(ctx context.Context, marsh
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_AuthRegister_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq AuthRegisterRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.AuthRegister(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_AuthRegister_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq AuthRegisterRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := server.AuthRegister(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_AuthApprove_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq AuthApproveRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.AuthApprove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_AuthApprove_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq AuthApproveRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := server.AuthApprove(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_AuthReject_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq AuthRejectRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.AuthReject(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_AuthReject_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq AuthRejectRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := server.AuthReject(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_CreateApiKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq CreateApiKeyRequest
|
||||
@@ -1272,6 +1353,66 @@ func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_BackfillNodeIPs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_AuthRegister_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/AuthRegister", runtime.WithHTTPPathPattern("/api/v1/auth/register"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_AuthRegister_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_AuthRegister_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_AuthApprove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/AuthApprove", runtime.WithHTTPPathPattern("/api/v1/auth/approve"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_AuthApprove_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_AuthApprove_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_AuthReject_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/AuthReject", runtime.WithHTTPPathPattern("/api/v1/auth/reject"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_AuthReject_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_AuthReject_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_CreateApiKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -1758,6 +1899,57 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_BackfillNodeIPs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_AuthRegister_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/AuthRegister", runtime.WithHTTPPathPattern("/api/v1/auth/register"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_AuthRegister_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_AuthRegister_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_AuthApprove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/AuthApprove", runtime.WithHTTPPathPattern("/api/v1/auth/approve"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_AuthApprove_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_AuthApprove_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_AuthReject_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/AuthReject", runtime.WithHTTPPathPattern("/api/v1/auth/reject"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_AuthReject_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_AuthReject_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_CreateApiKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -1899,6 +2091,9 @@ var (
|
||||
pattern_HeadscaleService_RenameNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "node", "node_id", "rename", "new_name"}, ""))
|
||||
pattern_HeadscaleService_ListNodes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "node"}, ""))
|
||||
pattern_HeadscaleService_BackfillNodeIPs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "node", "backfillips"}, ""))
|
||||
pattern_HeadscaleService_AuthRegister_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "auth", "register"}, ""))
|
||||
pattern_HeadscaleService_AuthApprove_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "auth", "approve"}, ""))
|
||||
pattern_HeadscaleService_AuthReject_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "auth", "reject"}, ""))
|
||||
pattern_HeadscaleService_CreateApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "apikey"}, ""))
|
||||
pattern_HeadscaleService_ExpireApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "apikey", "expire"}, ""))
|
||||
pattern_HeadscaleService_ListApiKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "apikey"}, ""))
|
||||
@@ -1927,6 +2122,9 @@ var (
|
||||
forward_HeadscaleService_RenameNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListNodes_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_BackfillNodeIPs_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_AuthRegister_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_AuthApprove_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_AuthReject_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_CreateApiKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ExpireApiKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListApiKeys_0 = runtime.ForwardResponseMessage
|
||||
|
||||
@@ -37,6 +37,9 @@ const (
|
||||
HeadscaleService_RenameNode_FullMethodName = "/headscale.v1.HeadscaleService/RenameNode"
|
||||
HeadscaleService_ListNodes_FullMethodName = "/headscale.v1.HeadscaleService/ListNodes"
|
||||
HeadscaleService_BackfillNodeIPs_FullMethodName = "/headscale.v1.HeadscaleService/BackfillNodeIPs"
|
||||
HeadscaleService_AuthRegister_FullMethodName = "/headscale.v1.HeadscaleService/AuthRegister"
|
||||
HeadscaleService_AuthApprove_FullMethodName = "/headscale.v1.HeadscaleService/AuthApprove"
|
||||
HeadscaleService_AuthReject_FullMethodName = "/headscale.v1.HeadscaleService/AuthReject"
|
||||
HeadscaleService_CreateApiKey_FullMethodName = "/headscale.v1.HeadscaleService/CreateApiKey"
|
||||
HeadscaleService_ExpireApiKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpireApiKey"
|
||||
HeadscaleService_ListApiKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListApiKeys"
|
||||
@@ -71,6 +74,10 @@ type HeadscaleServiceClient interface {
|
||||
RenameNode(ctx context.Context, in *RenameNodeRequest, opts ...grpc.CallOption) (*RenameNodeResponse, error)
|
||||
ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error)
|
||||
BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error)
|
||||
// --- Auth start ---
|
||||
AuthRegister(ctx context.Context, in *AuthRegisterRequest, opts ...grpc.CallOption) (*AuthRegisterResponse, error)
|
||||
AuthApprove(ctx context.Context, in *AuthApproveRequest, opts ...grpc.CallOption) (*AuthApproveResponse, error)
|
||||
AuthReject(ctx context.Context, in *AuthRejectRequest, opts ...grpc.CallOption) (*AuthRejectResponse, error)
|
||||
// --- ApiKeys start ---
|
||||
CreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error)
|
||||
ExpireApiKey(ctx context.Context, in *ExpireApiKeyRequest, opts ...grpc.CallOption) (*ExpireApiKeyResponse, error)
|
||||
@@ -271,6 +278,36 @@ func (c *headscaleServiceClient) BackfillNodeIPs(ctx context.Context, in *Backfi
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) AuthRegister(ctx context.Context, in *AuthRegisterRequest, opts ...grpc.CallOption) (*AuthRegisterResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(AuthRegisterResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_AuthRegister_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) AuthApprove(ctx context.Context, in *AuthApproveRequest, opts ...grpc.CallOption) (*AuthApproveResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(AuthApproveResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_AuthApprove_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) AuthReject(ctx context.Context, in *AuthRejectRequest, opts ...grpc.CallOption) (*AuthRejectResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(AuthRejectResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_AuthReject_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) CreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(CreateApiKeyResponse)
|
||||
@@ -366,6 +403,10 @@ type HeadscaleServiceServer interface {
|
||||
RenameNode(context.Context, *RenameNodeRequest) (*RenameNodeResponse, error)
|
||||
ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error)
|
||||
BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error)
|
||||
// --- Auth start ---
|
||||
AuthRegister(context.Context, *AuthRegisterRequest) (*AuthRegisterResponse, error)
|
||||
AuthApprove(context.Context, *AuthApproveRequest) (*AuthApproveResponse, error)
|
||||
AuthReject(context.Context, *AuthRejectRequest) (*AuthRejectResponse, error)
|
||||
// --- ApiKeys start ---
|
||||
CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error)
|
||||
ExpireApiKey(context.Context, *ExpireApiKeyRequest) (*ExpireApiKeyResponse, error)
|
||||
@@ -440,6 +481,15 @@ func (UnimplementedHeadscaleServiceServer) ListNodes(context.Context, *ListNodes
|
||||
func (UnimplementedHeadscaleServiceServer) BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method BackfillNodeIPs not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) AuthRegister(context.Context, *AuthRegisterRequest) (*AuthRegisterResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method AuthRegister not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) AuthApprove(context.Context, *AuthApproveRequest) (*AuthApproveResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method AuthApprove not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) AuthReject(context.Context, *AuthRejectRequest) (*AuthRejectResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method AuthReject not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method CreateApiKey not implemented")
|
||||
}
|
||||
@@ -806,6 +856,60 @@ func _HeadscaleService_BackfillNodeIPs_Handler(srv interface{}, ctx context.Cont
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_AuthRegister_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(AuthRegisterRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).AuthRegister(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_AuthRegister_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).AuthRegister(ctx, req.(*AuthRegisterRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_AuthApprove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(AuthApproveRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).AuthApprove(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_AuthApprove_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).AuthApprove(ctx, req.(*AuthApproveRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_AuthReject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(AuthRejectRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).AuthReject(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_AuthReject_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).AuthReject(ctx, req.(*AuthRejectRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_CreateApiKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateApiKeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
@@ -1011,6 +1115,18 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "BackfillNodeIPs",
|
||||
Handler: _HeadscaleService_BackfillNodeIPs_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "AuthRegister",
|
||||
Handler: _HeadscaleService_AuthRegister_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "AuthApprove",
|
||||
Handler: _HeadscaleService_AuthApprove_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "AuthReject",
|
||||
Handler: _HeadscaleService_AuthReject_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreateApiKey",
|
||||
Handler: _HeadscaleService_CreateApiKey_Handler,
|
||||
|
||||
@@ -715,9 +715,11 @@ func (*DeleteNodeResponse) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type ExpireNodeRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
|
||||
Expiry *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expiry,proto3" json:"expiry,omitempty"`
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
|
||||
Expiry *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expiry,proto3" json:"expiry,omitempty"`
|
||||
// When true, sets expiry to null (node will never expire).
|
||||
DisableExpiry bool `protobuf:"varint,3,opt,name=disable_expiry,json=disableExpiry,proto3" json:"disable_expiry,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -766,6 +768,13 @@ func (x *ExpireNodeRequest) GetExpiry() *timestamppb.Timestamp {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExpireNodeRequest) GetDisableExpiry() bool {
|
||||
if x != nil {
|
||||
return x.DisableExpiry
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type ExpireNodeResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
|
||||
@@ -1245,10 +1254,11 @@ const file_headscale_v1_node_proto_rawDesc = "" +
|
||||
"\x04node\x18\x01 \x01(\v2\x12.headscale.v1.NodeR\x04node\",\n" +
|
||||
"\x11DeleteNodeRequest\x12\x17\n" +
|
||||
"\anode_id\x18\x01 \x01(\x04R\x06nodeId\"\x14\n" +
|
||||
"\x12DeleteNodeResponse\"`\n" +
|
||||
"\x12DeleteNodeResponse\"\x87\x01\n" +
|
||||
"\x11ExpireNodeRequest\x12\x17\n" +
|
||||
"\anode_id\x18\x01 \x01(\x04R\x06nodeId\x122\n" +
|
||||
"\x06expiry\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x06expiry\"<\n" +
|
||||
"\x06expiry\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x06expiry\x12%\n" +
|
||||
"\x0edisable_expiry\x18\x03 \x01(\bR\rdisableExpiry\"<\n" +
|
||||
"\x12ExpireNodeResponse\x12&\n" +
|
||||
"\x04node\x18\x01 \x01(\v2\x12.headscale.v1.NodeR\x04node\"G\n" +
|
||||
"\x11RenameNodeRequest\x12\x17\n" +
|
||||
|
||||
44
gen/openapiv2/headscale/v1/auth.swagger.json
Normal file
44
gen/openapiv2/headscale/v1/auth.swagger.json
Normal file
@@ -0,0 +1,44 @@
|
||||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "headscale/v1/auth.proto",
|
||||
"version": "version not set"
|
||||
},
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {},
|
||||
"definitions": {
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"@type": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": {}
|
||||
},
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -138,6 +138,103 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/auth/approve": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_AuthApprove",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1AuthApproveResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1AuthApproveRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/auth/register": {
|
||||
"post": {
|
||||
"summary": "--- Auth start ---",
|
||||
"operationId": "HeadscaleService_AuthRegister",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1AuthRegisterResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1AuthRegisterRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/auth/reject": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_AuthReject",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1AuthRejectResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1AuthRejectRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/debug/node": {
|
||||
"post": {
|
||||
"summary": "--- Node start ---",
|
||||
@@ -420,6 +517,13 @@
|
||||
"required": false,
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
{
|
||||
"name": "disableExpiry",
|
||||
"description": "When true, sets expiry to null (node will never expire).",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "boolean"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
@@ -888,6 +992,47 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1AuthApproveRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"authId": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1AuthApproveResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1AuthRegisterRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "string"
|
||||
},
|
||||
"authId": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1AuthRegisterResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"node": {
|
||||
"$ref": "#/definitions/v1Node"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1AuthRejectRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"authId": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1AuthRejectResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1BackfillNodeIPsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
140
go.mod
140
go.mod
@@ -1,25 +1,27 @@
|
||||
module github.com/juanfont/headscale
|
||||
|
||||
go 1.25.5
|
||||
go 1.26.1
|
||||
|
||||
require (
|
||||
github.com/arl/statsviz v0.8.0
|
||||
github.com/cenkalti/backoff/v5 v5.0.3
|
||||
github.com/chasefleming/elem-go v0.31.0
|
||||
github.com/coder/websocket v1.8.14
|
||||
github.com/coreos/go-oidc/v3 v3.16.0
|
||||
github.com/coreos/go-oidc/v3 v3.17.0
|
||||
github.com/creachadair/command v0.2.0
|
||||
github.com/creachadair/flax v0.0.5
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/docker/docker v28.5.2+incompatible
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/glebarez/sqlite v1.11.0
|
||||
github.com/go-chi/chi/v5 v5.2.5
|
||||
github.com/go-chi/metrics v0.1.1
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.5
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced
|
||||
github.com/go-json-experiment/json v0.0.0-20251027170946-4849db3c2f7e
|
||||
github.com/gofrs/uuid/v5 v5.4.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7
|
||||
github.com/jagottsicher/termcolor v1.0.2
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25
|
||||
github.com/ory/dockertest/v3 v3.12.0
|
||||
@@ -28,7 +30,7 @@ require (
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/prometheus/common v0.67.5
|
||||
github.com/pterm/pterm v0.12.82
|
||||
github.com/puzpuzpuz/xsync/v4 v4.3.0
|
||||
github.com/puzpuzpuz/xsync/v4 v4.4.0
|
||||
github.com/rs/zerolog v1.34.0
|
||||
github.com/samber/lo v1.52.0
|
||||
github.com/sasha-s/go-deadlock v0.3.6
|
||||
@@ -40,18 +42,18 @@ require (
|
||||
github.com/tailscale/tailsql v0.0.0-20260105194658-001575c3ca09
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
||||
golang.org/x/crypto v0.46.0
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546
|
||||
golang.org/x/net v0.48.0
|
||||
golang.org/x/crypto v0.47.0
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96
|
||||
golang.org/x/net v0.49.0
|
||||
golang.org/x/oauth2 v0.34.0
|
||||
golang.org/x/sync v0.19.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20
|
||||
google.golang.org/grpc v1.78.0
|
||||
google.golang.org/protobuf v1.36.11
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gorm.io/driver/postgres v1.6.0
|
||||
gorm.io/gorm v1.31.1
|
||||
tailscale.com v1.94.0
|
||||
tailscale.com v1.94.1
|
||||
zgo.at/zcache/v2 v2.4.1
|
||||
zombiezen.com/go/postgrestest v1.0.1
|
||||
)
|
||||
@@ -80,6 +82,14 @@ require (
|
||||
modernc.org/sqlite v1.44.3
|
||||
)
|
||||
|
||||
// NOTE: gvisor must be updated in lockstep with
|
||||
// tailscale.com. The version used here should match
|
||||
// the version required by the tailscale.com dependency.
|
||||
// To find the correct version, check tailscale.com's
|
||||
// go.mod file for the gvisor.dev/gvisor version:
|
||||
// https://github.com/tailscale/tailscale/blob/main/go.mod
|
||||
require gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 // indirect
|
||||
|
||||
require (
|
||||
atomicgo.dev/cursor v0.2.0 // indirect
|
||||
atomicgo.dev/keyboard v0.2.9 // indirect
|
||||
@@ -90,102 +100,107 @@ require (
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/akutz/memconn v0.1.0 // indirect
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
|
||||
github.com/aws/smithy-go v1.24.0 // indirect
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02 // indirect
|
||||
github.com/axiomhq/hyperloglog v0.2.6 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0 // indirect
|
||||
github.com/clipperhouse/stringish v0.1.1 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.5.0 // indirect
|
||||
github.com/containerd/console v1.0.5 // indirect
|
||||
github.com/containerd/continuity v0.4.5 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/creachadair/mds v0.25.10 // indirect
|
||||
github.com/creachadair/msync v0.7.1 // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 // indirect
|
||||
github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect
|
||||
github.com/creachadair/mds v0.25.15 // indirect
|
||||
github.com/creachadair/msync v0.8.2 // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20250822163801-6d8e6105c62d // indirect
|
||||
github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v28.5.1+incompatible // indirect
|
||||
github.com/docker/cli v29.2.1+incompatible // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/gaissmai/bart v0.18.0 // indirect
|
||||
github.com/gaissmai/bart v0.26.1 // indirect
|
||||
github.com/glebarez/go-sqlite v1.22.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.2.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/go-github v17.0.0+incompatible // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect
|
||||
github.com/google/go-querystring v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gookit/color v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/hashicorp/go-version v1.8.0 // indirect
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/pgx/v5 v5.7.6 // indirect
|
||||
github.com/jackc/pgx/v5 v5.8.0 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 // indirect
|
||||
github.com/klauspost/compress v1.18.2 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.4.2 // indirect
|
||||
github.com/kamstrup/intmap v0.5.2 // indirect
|
||||
github.com/klauspost/compress v1.18.3 // indirect
|
||||
github.com/lib/pq v1.11.1 // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.8 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.19 // indirect
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 // indirect
|
||||
github.com/mdlayher/socket v0.5.0 // indirect
|
||||
github.com/mdlayher/netlink v1.8.0 // indirect
|
||||
github.com/mdlayher/socket v0.5.1 // indirect
|
||||
github.com/mitchellh/go-ps v1.0.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/moby/api v1.53.0 // indirect
|
||||
github.com/moby/moby/client v0.2.2 // indirect
|
||||
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/morikuni/aec v1.1.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opencontainers/runc v1.3.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490 // indirect
|
||||
github.com/pires/go-proxyproto v0.8.1 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741 // indirect
|
||||
github.com/pires/go-proxyproto v0.9.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus-community/pro-bing v0.4.0 // indirect
|
||||
github.com/prometheus-community/pro-bing v0.7.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/prometheus/procfs v0.19.2 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/safchain/ethtool v0.3.0 // indirect
|
||||
github.com/safchain/ethtool v0.7.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.12.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sirupsen/logrus v1.9.4 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
@@ -193,8 +208,8 @@ require (
|
||||
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect
|
||||
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc // indirect
|
||||
github.com/tailscale/setec v0.0.0-20251203133219-2ab774e4129a // indirect
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 // indirect
|
||||
github.com/tailscale/setec v0.0.0-20260115174028-19d190c5556d // indirect
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20251127225136-f19339b67368 // indirect
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
@@ -202,24 +217,23 @@ require (
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect
|
||||
go.opentelemetry.io/otel v1.39.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.39.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.39.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect
|
||||
go.opentelemetry.io/otel v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.40.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 // indirect
|
||||
golang.org/x/mod v0.30.0 // indirect
|
||||
golang.org/x/mod v0.32.0 // indirect
|
||||
golang.org/x/sys v0.40.0 // indirect
|
||||
golang.org/x/term v0.38.0 // indirect
|
||||
golang.org/x/text v0.32.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools v0.39.0 // indirect
|
||||
golang.org/x/term v0.39.0 // indirect
|
||||
golang.org/x/text v0.33.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
golang.org/x/tools v0.41.0 // indirect
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 // indirect
|
||||
)
|
||||
|
||||
tool (
|
||||
|
||||
313
go.sum
313
go.sum
@@ -33,53 +33,55 @@ github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEV
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A=
|
||||
github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/arl/statsviz v0.8.0 h1:O6GjjVxEDxcByAucOSl29HaGYLXsuwA3ujJw8H9E7/U=
|
||||
github.com/arl/statsviz v0.8.0/go.mod h1:XlrbiT7xYT03xaW9JMMfD8KFUhBOESJwfyNJu83PbB0=
|
||||
github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31 h1:8IwBjuLdqIO1dGB+dZ9zJEl8wzY3bVYxcs0Xyu/Lsc0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31/go.mod h1:8tMBcuVjL4kP/ECEIWTCWtwV2kj6+ouEKl4cqR4iWLw=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 h1:CjMzUs78RDDv4ROu3JnJn/Ig1r6ZD7/T2DXLLRpejic=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16/go.mod h1:uVW4OLBqbJXSHJYA9svT9BluSvvwbzLQ2Crf6UPzR3c=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5 h1:siiQ+jummya9OLPDEyHVb2dLW4aOMe22FGDd0sAfuSw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5/go.mod h1:iHVx2J9pWzITdP5MJY6qWfG34TfD9EA+Qi3eV6qQCXw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 h1:tkVNm99nkJnFo1H9IIQb5QkCiPcvCDn3Pos+IeTbGRA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12/go.mod h1:dIVlquSPUMqEJtx2/W17SM2SuESRaVEhEV9alcMqxjw=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 h1:JBod0SnNqcWQ0+uAyzeRFG1zCHotW8DukumYYyNy0zo=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3/go.mod h1:FHSHmyEUkzRbaFFqqm6bkLAOQHgqhsLmfCahvCBMiyA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 h1:DIBqIrJ7hv+e4CmIk2z3pyKT+3B6qVMgRsawHiR3qso=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7/go.mod h1:vLm00xmBke75UmpNvOcZQ/Q30ZFjbczeLFqGx5urmGo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 h1:NSbvS17MlI2lurYgXnCOLvCFX38sBW4eiVER7+kkgsU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16/go.mod h1:SwT8Tmqd4sA6G1qaGdzWCJN99bUmPGHfRwwq3G5Qb+A=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.93.2 h1:U3ygWUhCpiSPYSHOrRhb3gOl9T5Y3kB8k5Vjs//57bE=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.93.2/go.mod h1:79S2BdqCJpScXZA2y+cpZuocWsjGjJINyXnOsf5DTz8=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0 h1:IOdss+igJDFdic9w3WKwxGCmHqUxydvIhJOm9LJ32Dk=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
|
||||
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
||||
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02 h1:bXAPYSbdYbS5VTy92NIUbeDI1qyggi+JYh5op9IFlcQ=
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c=
|
||||
github.com/axiomhq/hyperloglog v0.2.6 h1:sRhvvF3RIXWQgAXaTphLp4yJiX4S0IN3MWTaAgZoRJw=
|
||||
github.com/axiomhq/hyperloglog v0.2.6/go.mod h1:YjX/dQqCR/7QYX0g8mu8UZAjpIenz1FKM71UEsjFoTo=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
@@ -101,8 +103,10 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/cilium/ebpf v0.17.3 h1:FnP4r16PWYSE4ux6zN+//jMcW4nMVRvuTLVTvCjyyjg=
|
||||
github.com/cilium/ebpf v0.17.3/go.mod h1:G5EDHij8yiLzaqn0WjyfJHvRa+3aDlReIaLVRMvOyJk=
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0 h1:ChwIKnQN3kcZteTXMgb1wztSgaU+ZemkgWdohwgs8tY=
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM=
|
||||
github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs=
|
||||
github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA=
|
||||
github.com/clipperhouse/uax29/v2 v2.5.0 h1:x7T0T4eTHDONxFJsL94uKNKPHrclyFI0lm7+w94cO8U=
|
||||
github.com/clipperhouse/uax29/v2 v2.5.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
|
||||
github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g=
|
||||
github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
@@ -118,38 +122,38 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
|
||||
github.com/coreos/go-oidc/v3 v3.16.0 h1:qRQUCFstKpXwmEjDQTIbyY/5jF00+asXzSkmkoa/mow=
|
||||
github.com/coreos/go-oidc/v3 v3.16.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
|
||||
github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc=
|
||||
github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creachadair/command v0.2.0 h1:qTA9cMMhZePAxFoNdnk6F6nn94s1qPndIg9hJbqI9cA=
|
||||
github.com/creachadair/command v0.2.0/go.mod h1:j+Ar+uYnFsHpkMeV9kGj6lJ45y9u2xqtg8FYy6cm+0o=
|
||||
github.com/creachadair/flax v0.0.5 h1:zt+CRuXQASxwQ68e9GHAOnEgAU29nF0zYMHOCrL5wzE=
|
||||
github.com/creachadair/flax v0.0.5/go.mod h1:F1PML0JZLXSNDMNiRGK2yjm5f+L9QCHchyHBldFymj8=
|
||||
github.com/creachadair/mds v0.25.10 h1:9k9JB35D1xhOCFl0liBhagBBp8fWWkKZrA7UXsfoHtA=
|
||||
github.com/creachadair/mds v0.25.10/go.mod h1:4hatI3hRM+qhzuAmqPRFvaBM8mONkS7nsLxkcuTYUIs=
|
||||
github.com/creachadair/msync v0.7.1 h1:SeZmuEBXQPe5GqV/C94ER7QIZPwtvFbeQiykzt/7uho=
|
||||
github.com/creachadair/msync v0.7.1/go.mod h1:8CcFlLsSujfHE5wWm19uUBLHIPDAUr6LXDwneVMO008=
|
||||
github.com/creachadair/mds v0.25.15 h1:i8CUqtfgbCqbvZ++L7lm8No3cOeic9YKF4vHEvEoj+Y=
|
||||
github.com/creachadair/mds v0.25.15/go.mod h1:XtMfRW15sjd1iOi1Z1k+dq0pRsR5xPbulpoTrpyhk8w=
|
||||
github.com/creachadair/msync v0.8.2 h1:ujvc/SVJPn+bFwmjUHucXNTTn3opVe2YbQ46mBCnP08=
|
||||
github.com/creachadair/msync v0.8.2/go.mod h1:LzxqD9kfIl/O3DczkwOgJplLPqwrTbIhINlf9bHIsEY=
|
||||
github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc=
|
||||
github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g=
|
||||
github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0=
|
||||
github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
|
||||
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
|
||||
github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 h1:vrC07UZcgPzu/OjWsmQKMGg3LoPSz9jh/pQXIrHjUj4=
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0/go.mod h1:Nx87SkVqTKd8UtT+xu7sM/l+LgXs6c0aHrlKusR+2EQ=
|
||||
github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8=
|
||||
github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw=
|
||||
github.com/dblohm7/wingoes v0.0.0-20250822163801-6d8e6105c62d h1:QRKpU+9ZBDs62LyBfwhZkJdB5DJX2Sm3p4kUh7l1aA0=
|
||||
github.com/dblohm7/wingoes v0.0.0-20250822163801-6d8e6105c62d/go.mod h1:SUxUaAK/0UG5lYyZR1L1nC4AaYYvSSYTWQSH3FPcxKU=
|
||||
github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33 h1:ucRHb6/lvW/+mTEIGbvhcYU3S8+uSNkuMjx/qZFfhtM=
|
||||
github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw=
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q=
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
|
||||
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
|
||||
github.com/docker/cli v28.5.1+incompatible h1:ESutzBALAD6qyCLqbQSEf1a/U8Ybms5agw59yGVc+yY=
|
||||
github.com/docker/cli v28.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v29.2.1+incompatible h1:n3Jt0QVCN65eiVBoUTZQM9mcQICCJt3akW4pKAbKdJg=
|
||||
github.com/docker/cli v29.2.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
|
||||
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||
@@ -169,22 +173,26 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/gaissmai/bart v0.18.0 h1:jQLBT/RduJu0pv/tLwXE+xKPgtWJejbxuXAR+wLJafo=
|
||||
github.com/gaissmai/bart v0.18.0/go.mod h1:JJzMAhNF5Rjo4SF4jWBrANuJfqY+FvsFhW7t1UZJ+XY=
|
||||
github.com/gaissmai/bart v0.26.1 h1:+w4rnLGNlA2GDVn382Tfe3jOsK5vOr5n4KmigJ9lbTo=
|
||||
github.com/gaissmai/bart v0.26.1/go.mod h1:GREWQfTLRWz/c5FTOsIw+KkscuFkIV5t8Rp7Nd1Td5c=
|
||||
github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I=
|
||||
github.com/github/fakeca v0.1.0/go.mod h1:+bormgoGMMuamOscx7N91aOuUST7wdaJ2rNjeohylyo=
|
||||
github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ=
|
||||
github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc=
|
||||
github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw=
|
||||
github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ=
|
||||
github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug=
|
||||
github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0=
|
||||
github.com/go-chi/metrics v0.1.1 h1:CXhbnkAVVjb0k73EBRQ6Z2YdWFnbXZgNtg1Mboguibk=
|
||||
github.com/go-chi/metrics v0.1.1/go.mod h1:mcGTM1pPalP7WCtb+akNYFO/lwNwBBLCuedepqjoPn4=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.5 h1:1OyorA5LtdQw12cyJDEHuTrEV3GiXiIhS4/QTTa/SM8=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.5/go.mod h1:mj9ekk/7CPF3VjopaFvWKN2v7fN3D9d3eEOAXRhi/+M=
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY=
|
||||
github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced h1:Q311OHjMh/u5E2TITc++WlTP5We0xNseRMkHDyvhW7I=
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
|
||||
github.com/go-json-experiment/json v0.0.0-20251027170946-4849db3c2f7e h1:Lf/gRkoycfOBPa42vU2bbgPurFong6zXeFtPoxholzU=
|
||||
github.com/go-json-experiment/json v0.0.0-20251027170946-4849db3c2f7e/go.mod h1:uNVvRXArCGbZ508SxYYTC5v1JWoz2voff5pm25jU1Ok=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@@ -194,42 +202,42 @@ github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 h1:cf60tHxREO3g1nroKr2osU3JWZsJzkfi7rEg+oAB0Lo=
|
||||
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737/go.mod h1:MIS0jDzbU/vuM9MC4YnBITCv+RYuTRq8dJzmCrFsK9g=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg=
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU=
|
||||
github.com/godbus/dbus/v5 v5.2.2 h1:TUR3TgtSVDmjiXOgAAyaZbYmIeP3DPkld3jgKGV8mXQ=
|
||||
github.com/godbus/dbus/v5 v5.2.2/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c=
|
||||
github.com/gofrs/uuid/v5 v5.4.0 h1:EfbpCTjqMuGyq5ZJwxqzn3Cbr2d0rUZU7v5ycAk/e/0=
|
||||
github.com/gofrs/uuid/v5 v5.4.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/go-querystring v1.2.0 h1:yhqkPbu2/OH+V9BfpCVPZkNmUXhb2gBxJArfhIxNtP0=
|
||||
github.com/google/go-querystring v1.2.0/go.mod h1:8IFJqpSRITyJ8QhQ13bmbeMBDfmeEJZD5A0egEOmkqU=
|
||||
github.com/google/go-tpm v0.9.4 h1:awZRf9FwOeTunQmHoDYSHJps3ie6f1UlhS1fOdPEt1I=
|
||||
github.com/google/go-tpm v0.9.4/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI=
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d h1:KJIErDwbSHjnp/SGzE5ed8Aol7JsKiI5X7yWKAtzhM0=
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
|
||||
github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef h1:xpF9fUHpoIrrjX24DURVKiwHcFpw19ndIs+FwTSMbno=
|
||||
github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
@@ -244,10 +252,10 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 h1:kEISI/Gx67NzH3nJxAmY/dGac80kKZgZt134u7Y/k1s=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4/go.mod h1:6Nz966r3vQYCqIzWsuEl9d7cf7mRhtDmm++sOxlnfxI=
|
||||
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
|
||||
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII=
|
||||
github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=
|
||||
github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
@@ -267,8 +275,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk=
|
||||
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
|
||||
github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo=
|
||||
github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jagottsicher/termcolor v1.0.2 h1:fo0c51pQSuLBN1+yVX2ZE+hE+P7ULb/TY8eRowJnrsM=
|
||||
@@ -282,10 +290,12 @@ github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 h1:JfD4jthWBqZMEffc5RjgmlzpYttAVw1sdnmiNaPO3hE=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1/go.mod h1:xJjT7t59UIZ62GLZbv6PLLo8VFrostJMPBAheR6OM8w=
|
||||
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
|
||||
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/jsimonetti/rtnetlink v1.4.2 h1:Df9w9TZ3npHTyDn0Ev9e1uzmN2odmXd0QX+J5GTEn90=
|
||||
github.com/jsimonetti/rtnetlink v1.4.2/go.mod h1:92s6LJdE+1iOrw+F2/RO7LYI2Qd8pPpFNNUYW06gcoM=
|
||||
github.com/kamstrup/intmap v0.5.2 h1:qnwBm1mh4XAnW9W9Ue9tZtTff8pS6+s6iKF6JRIV2Dk=
|
||||
github.com/kamstrup/intmap v0.5.2/go.mod h1:gWUVWHKzWj8xpJVFf5GC0O26bWmv3GqdnIX/LMT6Aq4=
|
||||
github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw=
|
||||
github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
@@ -306,8 +316,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
||||
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.11.1 h1:wuChtj2hfsGmmx3nf1m7xC2XpK6OtelS2shMY+bGMtI=
|
||||
github.com/lib/pq v1.11.1/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA=
|
||||
github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8LFgLN4=
|
||||
github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
@@ -323,18 +333,22 @@ github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byF
|
||||
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
|
||||
github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw=
|
||||
github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o=
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg=
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o=
|
||||
github.com/mdlayher/netlink v1.8.0 h1:e7XNIYJKD7hUct3Px04RuIGJbBxy1/c4nX7D5YyvvlM=
|
||||
github.com/mdlayher/netlink v1.8.0/go.mod h1:UhgKXUlDQhzb09DrCl2GuRNEglHmhYoWAHid9HK3594=
|
||||
github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c=
|
||||
github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE=
|
||||
github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI=
|
||||
github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI=
|
||||
github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos=
|
||||
github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ=
|
||||
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
|
||||
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
|
||||
github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=
|
||||
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/moby/api v1.53.0 h1:PihqG1ncw4W+8mZs69jlwGXdaYBeb5brF6BL7mPIS/w=
|
||||
github.com/moby/moby/api v1.53.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc=
|
||||
github.com/moby/moby/client v0.2.2 h1:Pt4hRMCAIlyjL3cr8M5TrXCwKzguebPAc2do2ur7dEM=
|
||||
github.com/moby/moby/client v0.2.2/go.mod h1:2EkIPVNCqR05CMIzL1mfA07t0HvVUUOl85pasRz/GmQ=
|
||||
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
|
||||
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
|
||||
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
||||
@@ -343,8 +357,8 @@ github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
|
||||
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/morikuni/aec v1.1.0 h1:vBBl0pUnvi/Je71dsRrhMBtreIqNMYErSAbEeb8jrXQ=
|
||||
github.com/morikuni/aec v1.1.0/go.mod h1:xDRgiq/iw5l+zkao76YTKzKttOp2cwPEne25HDkJnBw=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
@@ -365,14 +379,14 @@ github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXR
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490 h1:QTvNkZ5ylY0PGgA+Lih+GdboMLY/G9SEGLMEGVjTVA4=
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741 h1:KPpdlQLZcHfTMQRi6bFQ7ogNO0ltFT4PmtwTLW4W+14=
|
||||
github.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1 h1:EMacvLRUd2O1K0eWod27ZP5CY1iTNkhBDLSN+Q4JEvA=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1/go.mod h1:qXbiq/2X4ZUMMshsqlWyTHOcw7ns+GZmlqZZN05ZHcQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pires/go-proxyproto v0.8.1 h1:9KEixbdJfhrbtjpz/ZwCdWDD2Xem0NZ38qMYaASJgp0=
|
||||
github.com/pires/go-proxyproto v0.8.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU=
|
||||
github.com/pires/go-proxyproto v0.9.2 h1:H1UdHn695zUVVmB0lQ354lOWHOy6TZSpzBl3tgN0s1U=
|
||||
github.com/pires/go-proxyproto v0.9.2/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
@@ -382,16 +396,16 @@ github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Q
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4=
|
||||
github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4=
|
||||
github.com/prometheus-community/pro-bing v0.7.0 h1:KFYFbxC2f2Fp6c+TyxbCOEarf7rbnzr9Gw8eIb0RfZA=
|
||||
github.com/prometheus-community/pro-bing v0.7.0/go.mod h1:Moob9dvlY50Bfq6i88xIwfyw7xLFHH69LUgx9n5zqCE=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
|
||||
github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=
|
||||
github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw=
|
||||
github.com/pterm/pterm v0.12.27/go.mod h1:PhQ89w4i95rhgE+xedAoqous6K9X+r6aSOI2eFF7DZI=
|
||||
github.com/pterm/pterm v0.12.29/go.mod h1:WI3qxgvoQFFGKGjGnJR849gU0TsEOvKn5Q8LlY1U7lg=
|
||||
github.com/pterm/pterm v0.12.30/go.mod h1:MOqLIyMOgmTDz9yorcYbcw+HsgoZo3BQfg2wtl3HEFE=
|
||||
@@ -401,8 +415,8 @@ github.com/pterm/pterm v0.12.36/go.mod h1:NjiL09hFhT/vWjQHSj1athJpx6H8cjpHXNAK5b
|
||||
github.com/pterm/pterm v0.12.40/go.mod h1:ffwPLwlbXxP+rxT0GsgDTzS3y3rmpAO1NMjUkGTYf8s=
|
||||
github.com/pterm/pterm v0.12.82 h1:+D9wYhCaeaK0FIQoZtqbNQuNpe2lB2tajKKsTd5paVQ=
|
||||
github.com/pterm/pterm v0.12.82/go.mod h1:TyuyrPjnxfwP+ccJdBTeWHtd/e0ybQHkOS/TakajZCw=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.3.0 h1:w/bWkEJdYuRNYhHn5eXnIT8LzDM1O629X1I9MJSkD7Q=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.3.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.4.0 h1:vlSN6/CkEY0pY8KaB0yqo/pCLZvp9nhdbBdjipT4gWo=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.4.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
@@ -412,8 +426,8 @@ github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
|
||||
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0=
|
||||
github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs=
|
||||
github.com/safchain/ethtool v0.7.0 h1:rlJzfDetsVvT61uz8x1YIcFn12akMfuPulHtZjtb7Is=
|
||||
github.com/safchain/ethtool v0.7.0/go.mod h1:MenQKEjXdfkjD3mp2QdCk8B/hwvkrlOTm/FD4gTpFxQ=
|
||||
github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4=
|
||||
github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI=
|
||||
github.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw=
|
||||
@@ -423,8 +437,8 @@ github.com/sasha-s/go-deadlock v0.3.6/go.mod h1:CUqNyyvMxTyjFqDT7MRg9mb4Dv/btmGT
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
|
||||
github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
|
||||
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
|
||||
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
|
||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||
@@ -462,14 +476,14 @@ github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4
|
||||
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0=
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA=
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc=
|
||||
github.com/tailscale/setec v0.0.0-20251203133219-2ab774e4129a h1:TApskGPim53XY5WRt5hX4DnO8V6CmVoimSklryIoGMM=
|
||||
github.com/tailscale/setec v0.0.0-20251203133219-2ab774e4129a/go.mod h1:+6WyG6kub5/5uPsMdYQuSti8i6F5WuKpFWLQnZt/Mms=
|
||||
github.com/tailscale/setec v0.0.0-20260115174028-19d190c5556d h1:N+TtzIaGYREbLbKZB0WU0vVnMSfaqUkSf3qMEi03hwE=
|
||||
github.com/tailscale/setec v0.0.0-20260115174028-19d190c5556d/go.mod h1:6NU8H/GLPVX2TnXAY1duyy9ylLaHwFpr0X93UPiYmNI=
|
||||
github.com/tailscale/squibble v0.0.0-20251104223530-a961feffb67f h1:CL6gu95Y1o2ko4XiWPvWkJka0QmQWcUyPywWVWDPQbQ=
|
||||
github.com/tailscale/squibble v0.0.0-20251104223530-a961feffb67f/go.mod h1:xJkMmR3t+thnUQhA3Q4m2VSlS5pcOq+CIjmU/xfKKx4=
|
||||
github.com/tailscale/tailsql v0.0.0-20260105194658-001575c3ca09 h1:Fc9lE2cDYJbBLpCqnVmoLdf7McPqoHZiDxDPPpkJM04=
|
||||
github.com/tailscale/tailsql v0.0.0-20260105194658-001575c3ca09/go.mod h1:QMNhC4XGFiXKngHVLXE+ERDmQoH0s5fD7AUxupykocQ=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20251127225136-f19339b67368 h1:0tpDdAj9sSfSZg4gMwNTdqMP592sBrq2Sm0w6ipnh7k=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20251127225136-f19339b67368/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
|
||||
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M=
|
||||
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da h1:jVRUZPRs9sqyKlYHHzHjAqKN+6e/Vog6NpHYeNPJqOw=
|
||||
@@ -480,8 +494,8 @@ github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA=
|
||||
github.com/tc-hib/winres v0.2.1/go.mod h1:C/JaNhH3KBvhNKVbvdlDWkbMDO9H4fKKDaN7/07SSuk=
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e h1:IWllFTiDjjLIf2oeKxpIUmtiDV5sn71VgeQgg6vcE7k=
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e/go.mod h1:d7u6HkTYKSv5m6MCKkOQlHwaShTMl3HjqSGW3XtVhXM=
|
||||
github.com/tink-crypto/tink-go/v2 v2.1.0 h1:QXFBguwMwTIaU17EgZpEJWsUSc60b1BAGTzBIoMdmok=
|
||||
github.com/tink-crypto/tink-go/v2 v2.1.0/go.mod h1:y1TnYFt1i2eZVfx4OGc+C+EMp4CoKWAw2VSEuoicHHI=
|
||||
github.com/tink-crypto/tink-go/v2 v2.6.0 h1:+KHNBHhWH33Vn+igZWcsgdEPUxKwBMEe0QC60t388v4=
|
||||
github.com/tink-crypto/tink-go/v2 v2.6.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8=
|
||||
github.com/u-root/u-root v0.14.0 h1:Ka4T10EEML7dQ5XDvO9c3MBN8z4nuSnGjcd1jmU2ivg=
|
||||
github.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1WMluqE=
|
||||
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM=
|
||||
@@ -503,24 +517,24 @@ github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJu
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ=
|
||||
go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
|
||||
go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ=
|
||||
go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
|
||||
go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
|
||||
go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18=
|
||||
go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew=
|
||||
go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
|
||||
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0=
|
||||
go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
|
||||
go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40=
|
||||
go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
|
||||
go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
|
||||
go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
|
||||
go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
@@ -534,25 +548,25 @@ go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/W
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
|
||||
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY=
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
|
||||
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
|
||||
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.27.0 h1:C8gA4oWU/tKkdCfYT6T2u4faJu3MeNS5O8UPWlPF61w=
|
||||
golang.org/x/image v0.27.0/go.mod h1:xbdrClrAUway1MUTEZDq9mz/UpRwYAkFFNUslZtcB+g=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
|
||||
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
|
||||
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
|
||||
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
||||
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
||||
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
||||
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -572,10 +586,8 @@ golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -591,36 +603,35 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
|
||||
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
|
||||
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
|
||||
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
|
||||
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
|
||||
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
|
||||
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b h1:uA40e2M6fYRBf0+8uN5mLlqUtV192iiksiICIBkYJ1E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Xa7le7qx2vmqB/SzWUBa7KdMjpdpAHlh5QCSnjessQk=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 h1:7ei4lp52gK1uSejlA8AZl5AJjeLUOHBQscRQZUgAcu0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20/go.mod h1:ZdbssH/1SOVnjnDlXzxDHK2MCidiqXtbYccJNzNYPEE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 h1:Jr5R2J6F6qWyzINc+4AM8t5pfUz6beZpHp678GNrMbE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
||||
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
|
||||
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
|
||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||
@@ -639,8 +650,8 @@ gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
|
||||
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
|
||||
gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg=
|
||||
gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
|
||||
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k=
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM=
|
||||
honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0 h1:5SXjd4ET5dYijLaf0O3aOenC0Z4ZafIWSpjUzsQaNho=
|
||||
@@ -675,10 +686,12 @@ modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk=
|
||||
pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k=
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
|
||||
tailscale.com v1.94.0 h1:5oW3SF35aU9ekHDhP2J4CHewnA2NxE7SRilDB2pVjaA=
|
||||
tailscale.com v1.94.0/go.mod h1:gLnVrEOP32GWvroaAHHGhjSGMPJ1i4DvqNwEg+Yuov4=
|
||||
tailscale.com v1.94.1 h1:0dAst/ozTuFkgmxZULc3oNwR9+qPIt5ucvzH7kaM0Jw=
|
||||
tailscale.com v1.94.1/go.mod h1:gLnVrEOP32GWvroaAHHGhjSGMPJ1i4DvqNwEg+Yuov4=
|
||||
zgo.at/zcache/v2 v2.4.1 h1:Dfjoi8yI0Uq7NCc4lo2kaQJJmp9Mijo21gef+oJstbY=
|
||||
zgo.at/zcache/v2 v2.4.1/go.mod h1:gyCeoLVo01QjDZynjime8xUGHHMbsLiPyUTBpDGd4Gk=
|
||||
zombiezen.com/go/postgrestest v1.0.1 h1:aXoADQAJmZDU3+xilYVut0pHhgc0sF8ZspPW9gFNwP4=
|
||||
|
||||
258
hscontrol/app.go
258
hscontrol/app.go
@@ -20,7 +20,9 @@ import (
|
||||
|
||||
"github.com/cenkalti/backoff/v5"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/go-chi/metrics"
|
||||
grpcRuntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
@@ -115,13 +117,14 @@ var (
|
||||
|
||||
func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
var err error
|
||||
|
||||
if profilingEnabled {
|
||||
runtime.SetBlockProfileRate(1)
|
||||
}
|
||||
|
||||
noisePrivateKey, err := readOrCreatePrivateKey(cfg.NoisePrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read or create Noise protocol private key: %w", err)
|
||||
return nil, fmt.Errorf("reading or creating Noise protocol private key: %w", err)
|
||||
}
|
||||
|
||||
s, err := state.NewState(cfg)
|
||||
@@ -140,27 +143,30 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
ephemeralGC := db.NewEphemeralGarbageCollector(func(ni types.NodeID) {
|
||||
node, ok := app.state.GetNodeByID(ni)
|
||||
if !ok {
|
||||
log.Error().Uint64("node.id", ni.Uint64()).Msg("Ephemeral node deletion failed")
|
||||
log.Debug().Caller().Uint64("node.id", ni.Uint64()).Msg("Ephemeral node deletion failed because node not found in NodeStore")
|
||||
log.Error().Uint64("node.id", ni.Uint64()).Msg("ephemeral node deletion failed")
|
||||
log.Debug().Caller().Uint64("node.id", ni.Uint64()).Msg("ephemeral node deletion failed because node not found in NodeStore")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
policyChanged, err := app.state.DeleteNode(node)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Uint64("node.id", ni.Uint64()).Str("node.name", node.Hostname()).Msg("Ephemeral node deletion failed")
|
||||
log.Error().Err(err).EmbedObject(node).Msg("ephemeral node deletion failed")
|
||||
return
|
||||
}
|
||||
|
||||
app.Change(policyChanged)
|
||||
log.Debug().Caller().Uint64("node.id", ni.Uint64()).Str("node.name", node.Hostname()).Msg("Ephemeral node deleted because garbage collection timeout reached")
|
||||
log.Debug().Caller().EmbedObject(node).Msg("ephemeral node deleted because garbage collection timeout reached")
|
||||
})
|
||||
app.ephemeralGC = ephemeralGC
|
||||
|
||||
var authProvider AuthProvider
|
||||
|
||||
authProvider = NewAuthProviderWeb(cfg.ServerURL)
|
||||
if cfg.OIDC.Issuer != "" {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
oidcProvider, err := NewAuthProviderOIDC(
|
||||
ctx,
|
||||
&app,
|
||||
@@ -177,17 +183,18 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
authProvider = oidcProvider
|
||||
}
|
||||
}
|
||||
|
||||
app.authProvider = authProvider
|
||||
|
||||
if app.cfg.TailcfgDNSConfig != nil && app.cfg.TailcfgDNSConfig.Proxied { // if MagicDNS
|
||||
// TODO(kradalby): revisit why this takes a list.
|
||||
|
||||
var magicDNSDomains []dnsname.FQDN
|
||||
if cfg.PrefixV4 != nil {
|
||||
magicDNSDomains = append(
|
||||
magicDNSDomains,
|
||||
util.GenerateIPv4DNSRootDomain(*cfg.PrefixV4)...)
|
||||
}
|
||||
|
||||
if cfg.PrefixV6 != nil {
|
||||
magicDNSDomains = append(
|
||||
magicDNSDomains,
|
||||
@@ -198,6 +205,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
if app.cfg.TailcfgDNSConfig.Routes == nil {
|
||||
app.cfg.TailcfgDNSConfig.Routes = make(map[string][]*dnstype.Resolver)
|
||||
}
|
||||
|
||||
for _, d := range magicDNSDomains {
|
||||
app.cfg.TailcfgDNSConfig.Routes[d.WithoutTrailingDot()] = nil
|
||||
}
|
||||
@@ -206,7 +214,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
if cfg.DERP.ServerEnabled {
|
||||
derpServerKey, err := readOrCreatePrivateKey(cfg.DERP.ServerPrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read or create DERP server private key: %w", err)
|
||||
return nil, fmt.Errorf("reading or creating DERP server private key: %w", err)
|
||||
}
|
||||
|
||||
if derpServerKey.Equal(*noisePrivateKey) {
|
||||
@@ -232,6 +240,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
app.DERPServer = embeddedDERPServer
|
||||
}
|
||||
|
||||
@@ -251,9 +260,11 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
lastExpiryCheck := time.Unix(0, 0)
|
||||
|
||||
derpTickerChan := make(<-chan time.Time)
|
||||
|
||||
if h.cfg.DERP.AutoUpdate && h.cfg.DERP.UpdateFrequency != 0 {
|
||||
derpTicker := time.NewTicker(h.cfg.DERP.UpdateFrequency)
|
||||
defer derpTicker.Stop()
|
||||
|
||||
derpTickerChan = derpTicker.C
|
||||
}
|
||||
|
||||
@@ -271,8 +282,10 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
return
|
||||
|
||||
case <-expireTicker.C:
|
||||
var expiredNodeChanges []change.Change
|
||||
var changed bool
|
||||
var (
|
||||
expiredNodeChanges []change.Change
|
||||
changed bool
|
||||
)
|
||||
|
||||
lastExpiryCheck, expiredNodeChanges, changed = h.state.ExpireExpiredNodes(lastExpiryCheck)
|
||||
|
||||
@@ -286,12 +299,14 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
}
|
||||
|
||||
case <-derpTickerChan:
|
||||
log.Info().Msg("Fetching DERPMap updates")
|
||||
derpMap, err := backoff.Retry(ctx, func() (*tailcfg.DERPMap, error) {
|
||||
log.Info().Msg("fetching DERPMap updates")
|
||||
|
||||
derpMap, err := backoff.Retry(ctx, func() (*tailcfg.DERPMap, error) { //nolint:contextcheck
|
||||
derpMap, err := derp.GetDERPMap(h.cfg.DERP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
|
||||
region, _ := h.DERPServer.GenerateRegion()
|
||||
derpMap.Regions[region.RegionID] = ®ion
|
||||
@@ -303,6 +318,7 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
log.Error().Err(err).Msg("failed to build new DERPMap, retrying later")
|
||||
continue
|
||||
}
|
||||
|
||||
h.state.SetDERPMap(derpMap)
|
||||
|
||||
h.Change(change.DERPMap())
|
||||
@@ -311,6 +327,7 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
h.cfg.TailcfgDNSConfig.ExtraRecords = records
|
||||
|
||||
h.Change(change.ExtraRecords())
|
||||
@@ -339,7 +356,7 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
||||
if !ok {
|
||||
return ctx, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
"Retrieving metadata is failed",
|
||||
"retrieving metadata",
|
||||
)
|
||||
}
|
||||
|
||||
@@ -347,7 +364,7 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
||||
if !ok {
|
||||
return ctx, status.Errorf(
|
||||
codes.Unauthenticated,
|
||||
"Authorization token is not supplied",
|
||||
"authorization token not supplied",
|
||||
)
|
||||
}
|
||||
|
||||
@@ -362,7 +379,7 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
||||
|
||||
valid, err := h.state.ValidateAPIKey(strings.TrimPrefix(token, AuthPrefix))
|
||||
if err != nil {
|
||||
return ctx, status.Error(codes.Internal, "failed to validate token")
|
||||
return ctx, status.Error(codes.Internal, "validating token")
|
||||
}
|
||||
|
||||
if !valid {
|
||||
@@ -390,7 +407,8 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
|
||||
writeUnauthorized := func(statusCode int) {
|
||||
writer.WriteHeader(statusCode)
|
||||
if _, err := writer.Write([]byte("Unauthorized")); err != nil {
|
||||
|
||||
if _, err := writer.Write([]byte("Unauthorized")); err != nil { //nolint:noinlineerr
|
||||
log.Error().Err(err).Msg("writing HTTP response failed")
|
||||
}
|
||||
}
|
||||
@@ -401,6 +419,7 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg(`missing "Bearer " prefix in "Authorization" header`)
|
||||
writeUnauthorized(http.StatusUnauthorized)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -412,6 +431,7 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("failed to validate token")
|
||||
writeUnauthorized(http.StatusUnauthorized)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -420,6 +440,7 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("invalid token")
|
||||
writeUnauthorized(http.StatusUnauthorized)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -431,61 +452,73 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
// and will remove it if it is not.
|
||||
func (h *Headscale) ensureUnixSocketIsAbsent() error {
|
||||
// File does not exist, all fine
|
||||
if _, err := os.Stat(h.cfg.UnixSocket); errors.Is(err, os.ErrNotExist) {
|
||||
if _, err := os.Stat(h.cfg.UnixSocket); errors.Is(err, os.ErrNotExist) { //nolint:noinlineerr
|
||||
return nil
|
||||
}
|
||||
|
||||
return os.Remove(h.cfg.UnixSocket)
|
||||
}
|
||||
|
||||
func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
||||
router := mux.NewRouter()
|
||||
router.Use(prometheusMiddleware)
|
||||
func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *chi.Mux {
|
||||
r := chi.NewRouter()
|
||||
r.Use(metrics.Collector(metrics.CollectorOpts{
|
||||
Host: false,
|
||||
Proto: true,
|
||||
Skip: func(r *http.Request) bool {
|
||||
return r.Method != http.MethodOptions
|
||||
},
|
||||
}))
|
||||
r.Use(middleware.RequestID)
|
||||
r.Use(middleware.RealIP)
|
||||
r.Use(middleware.RequestLogger(&zerologRequestLogger{}))
|
||||
r.Use(middleware.Recoverer)
|
||||
|
||||
router.HandleFunc(ts2021UpgradePath, h.NoiseUpgradeHandler).
|
||||
Methods(http.MethodPost, http.MethodGet)
|
||||
r.Post(ts2021UpgradePath, h.NoiseUpgradeHandler)
|
||||
|
||||
router.HandleFunc("/robots.txt", h.RobotsHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/version", h.VersionHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/register/{registration_id}", h.authProvider.RegisterHandler).
|
||||
Methods(http.MethodGet)
|
||||
r.Get("/robots.txt", h.RobotsHandler)
|
||||
r.Get("/health", h.HealthHandler)
|
||||
r.Get("/version", h.VersionHandler)
|
||||
r.Get("/key", h.KeyHandler)
|
||||
r.Get("/register/{auth_id}", h.authProvider.RegisterHandler)
|
||||
r.Get("/auth/{auth_id}", h.authProvider.AuthHandler)
|
||||
|
||||
if provider, ok := h.authProvider.(*AuthProviderOIDC); ok {
|
||||
router.HandleFunc("/oidc/callback", provider.OIDCCallbackHandler).Methods(http.MethodGet)
|
||||
r.Get("/oidc/callback", provider.OIDCCallbackHandler)
|
||||
}
|
||||
router.HandleFunc("/apple", h.AppleConfigMessage).Methods(http.MethodGet)
|
||||
router.HandleFunc("/apple/{platform}", h.ApplePlatformConfig).
|
||||
Methods(http.MethodGet)
|
||||
router.HandleFunc("/windows", h.WindowsConfigMessage).Methods(http.MethodGet)
|
||||
|
||||
r.Get("/apple", h.AppleConfigMessage)
|
||||
r.Get("/apple/{platform}", h.ApplePlatformConfig)
|
||||
r.Get("/windows", h.WindowsConfigMessage)
|
||||
|
||||
// TODO(kristoffer): move swagger into a package
|
||||
router.HandleFunc("/swagger", headscale.SwaggerUI).Methods(http.MethodGet)
|
||||
router.HandleFunc("/swagger/v1/openapiv2.json", headscale.SwaggerAPIv1).
|
||||
Methods(http.MethodGet)
|
||||
r.Get("/swagger", headscale.SwaggerUI)
|
||||
r.Get("/swagger/v1/openapiv2.json", headscale.SwaggerAPIv1)
|
||||
|
||||
router.HandleFunc("/verify", h.VerifyHandler).Methods(http.MethodPost)
|
||||
r.Post("/verify", h.VerifyHandler)
|
||||
|
||||
if h.cfg.DERP.ServerEnabled {
|
||||
router.HandleFunc("/derp", h.DERPServer.DERPHandler)
|
||||
router.HandleFunc("/derp/probe", derpServer.DERPProbeHandler)
|
||||
router.HandleFunc("/derp/latency-check", derpServer.DERPProbeHandler)
|
||||
router.HandleFunc("/bootstrap-dns", derpServer.DERPBootstrapDNSHandler(h.state.DERPMap()))
|
||||
r.HandleFunc("/derp", h.DERPServer.DERPHandler)
|
||||
r.HandleFunc("/derp/probe", derpServer.DERPProbeHandler)
|
||||
r.HandleFunc("/derp/latency-check", derpServer.DERPProbeHandler)
|
||||
r.HandleFunc("/bootstrap-dns", derpServer.DERPBootstrapDNSHandler(h.state.DERPMap()))
|
||||
}
|
||||
|
||||
apiRouter := router.PathPrefix("/api").Subrouter()
|
||||
apiRouter.Use(h.httpAuthenticationMiddleware)
|
||||
apiRouter.PathPrefix("/v1/").HandlerFunc(grpcMux.ServeHTTP)
|
||||
router.HandleFunc("/favicon.ico", FaviconHandler)
|
||||
router.PathPrefix("/").HandlerFunc(BlankHandler)
|
||||
r.Route("/api", func(r chi.Router) {
|
||||
r.Use(h.httpAuthenticationMiddleware)
|
||||
r.HandleFunc("/v1/*", grpcMux.ServeHTTP)
|
||||
})
|
||||
r.Get("/favicon.ico", FaviconHandler)
|
||||
r.Get("/", BlankHandler)
|
||||
|
||||
return router
|
||||
return r
|
||||
}
|
||||
|
||||
// Serve launches the HTTP and gRPC server service Headscale and the API.
|
||||
//
|
||||
//nolint:gocyclo // complex server startup function
|
||||
func (h *Headscale) Serve() error {
|
||||
var err error
|
||||
|
||||
capver.CanOldCodeBeCleanedUp()
|
||||
|
||||
if profilingEnabled {
|
||||
@@ -506,12 +539,13 @@ func (h *Headscale) Serve() error {
|
||||
}
|
||||
|
||||
versionInfo := types.GetVersionInfo()
|
||||
log.Info().Str("version", versionInfo.Version).Str("commit", versionInfo.Commit).Msg("Starting Headscale")
|
||||
log.Info().Str("version", versionInfo.Version).Str("commit", versionInfo.Commit).Msg("starting headscale")
|
||||
log.Info().
|
||||
Str("minimum_version", capver.TailscaleVersion(capver.MinSupportedCapabilityVersion)).
|
||||
Msg("Clients with a lower minimum version will be rejected")
|
||||
|
||||
h.mapBatcher = mapper.NewBatcherAndMapper(h.cfg, h.state)
|
||||
|
||||
h.mapBatcher.Start()
|
||||
defer h.mapBatcher.Close()
|
||||
|
||||
@@ -526,7 +560,7 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
derpMap, err := derp.GetDERPMap(h.cfg.DERP)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get DERPMap: %w", err)
|
||||
return fmt.Errorf("getting DERPMap: %w", err)
|
||||
}
|
||||
|
||||
if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
|
||||
@@ -545,6 +579,7 @@ func (h *Headscale) Serve() error {
|
||||
// around between restarts, they will reconnect and the GC will
|
||||
// be cancelled.
|
||||
go h.ephemeralGC.Start()
|
||||
|
||||
ephmNodes := h.state.ListEphemeralNodes()
|
||||
for _, node := range ephmNodes.All() {
|
||||
h.ephemeralGC.Schedule(node.ID(), h.cfg.EphemeralNodeInactivityTimeout)
|
||||
@@ -555,7 +590,9 @@ func (h *Headscale) Serve() error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up extrarecord manager: %w", err)
|
||||
}
|
||||
|
||||
h.cfg.TailcfgDNSConfig.ExtraRecords = h.extraRecordMan.Records()
|
||||
|
||||
go h.extraRecordMan.Run()
|
||||
defer h.extraRecordMan.Close()
|
||||
}
|
||||
@@ -564,6 +601,7 @@ func (h *Headscale) Serve() error {
|
||||
// records updates
|
||||
scheduleCtx, scheduleCancel := context.WithCancel(context.Background())
|
||||
defer scheduleCancel()
|
||||
|
||||
go h.scheduledTasks(scheduleCtx)
|
||||
|
||||
if zl.GlobalLevel() == zl.TraceLevel {
|
||||
@@ -576,6 +614,7 @@ func (h *Headscale) Serve() error {
|
||||
errorGroup := new(errgroup.Group)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
@@ -586,29 +625,30 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
err = h.ensureUnixSocketIsAbsent()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to remove old socket file: %w", err)
|
||||
return fmt.Errorf("removing old socket file: %w", err)
|
||||
}
|
||||
|
||||
socketDir := filepath.Dir(h.cfg.UnixSocket)
|
||||
|
||||
err = util.EnsureDir(socketDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up unix socket: %w", err)
|
||||
}
|
||||
|
||||
socketListener, err := net.Listen("unix", h.cfg.UnixSocket)
|
||||
socketListener, err := new(net.ListenConfig).Listen(context.Background(), "unix", h.cfg.UnixSocket)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set up gRPC socket: %w", err)
|
||||
return fmt.Errorf("setting up gRPC socket: %w", err)
|
||||
}
|
||||
|
||||
// Change socket permissions
|
||||
if err := os.Chmod(h.cfg.UnixSocket, h.cfg.UnixSocketPermission); err != nil {
|
||||
return fmt.Errorf("failed change permission of gRPC socket: %w", err)
|
||||
if err := os.Chmod(h.cfg.UnixSocket, h.cfg.UnixSocketPermission); err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("changing gRPC socket permission: %w", err)
|
||||
}
|
||||
|
||||
grpcGatewayMux := grpcRuntime.NewServeMux()
|
||||
|
||||
// Make the grpc-gateway connect to grpc over socket
|
||||
grpcGatewayConn, err := grpc.Dial(
|
||||
grpcGatewayConn, err := grpc.Dial( //nolint:staticcheck // SA1019: deprecated but supported in 1.x
|
||||
h.cfg.UnixSocket,
|
||||
[]grpc.DialOption{
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
@@ -659,10 +699,13 @@ func (h *Headscale) Serve() error {
|
||||
// https://github.com/soheilhy/cmux/issues/68
|
||||
// https://github.com/soheilhy/cmux/issues/91
|
||||
|
||||
var grpcServer *grpc.Server
|
||||
var grpcListener net.Listener
|
||||
var (
|
||||
grpcServer *grpc.Server
|
||||
grpcListener net.Listener
|
||||
)
|
||||
|
||||
if tlsConfig != nil || h.cfg.GRPCAllowInsecure {
|
||||
log.Info().Msgf("Enabling remote gRPC at %s", h.cfg.GRPCAddr)
|
||||
log.Info().Msgf("enabling remote gRPC at %s", h.cfg.GRPCAddr)
|
||||
|
||||
grpcOptions := []grpc.ServerOption{
|
||||
grpc.ChainUnaryInterceptor(
|
||||
@@ -685,9 +728,9 @@ func (h *Headscale) Serve() error {
|
||||
v1.RegisterHeadscaleServiceServer(grpcServer, newHeadscaleV1APIServer(h))
|
||||
reflection.Register(grpcServer)
|
||||
|
||||
grpcListener, err = net.Listen("tcp", h.cfg.GRPCAddr)
|
||||
grpcListener, err = new(net.ListenConfig).Listen(context.Background(), "tcp", h.cfg.GRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bind to TCP address: %w", err)
|
||||
return fmt.Errorf("binding to TCP address: %w", err)
|
||||
}
|
||||
|
||||
errorGroup.Go(func() error { return grpcServer.Serve(grpcListener) })
|
||||
@@ -715,14 +758,16 @@ func (h *Headscale) Serve() error {
|
||||
}
|
||||
|
||||
var httpListener net.Listener
|
||||
|
||||
if tlsConfig != nil {
|
||||
httpServer.TLSConfig = tlsConfig
|
||||
httpListener, err = tls.Listen("tcp", h.cfg.Addr, tlsConfig)
|
||||
} else {
|
||||
httpListener, err = net.Listen("tcp", h.cfg.Addr)
|
||||
httpListener, err = new(net.ListenConfig).Listen(context.Background(), "tcp", h.cfg.Addr)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bind to TCP address: %w", err)
|
||||
return fmt.Errorf("binding to TCP address: %w", err)
|
||||
}
|
||||
|
||||
errorGroup.Go(func() error { return httpServer.Serve(httpListener) })
|
||||
@@ -738,7 +783,7 @@ func (h *Headscale) Serve() error {
|
||||
if h.cfg.MetricsAddr != "" {
|
||||
debugHTTPListener, err = (&net.ListenConfig{}).Listen(ctx, "tcp", h.cfg.MetricsAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bind to TCP address: %w", err)
|
||||
return fmt.Errorf("binding to TCP address: %w", err)
|
||||
}
|
||||
|
||||
debugHTTPServer = h.debugHTTPServer()
|
||||
@@ -751,19 +796,24 @@ func (h *Headscale) Serve() error {
|
||||
log.Info().Msg("metrics server disabled (metrics_listen_addr is empty)")
|
||||
}
|
||||
|
||||
|
||||
var tailsqlContext context.Context
|
||||
|
||||
if tailsqlEnabled {
|
||||
if h.cfg.Database.Type != types.DatabaseSqlite {
|
||||
//nolint:gocritic // exitAfterDefer: Fatal exits during initialization before servers start
|
||||
log.Fatal().
|
||||
Str("type", h.cfg.Database.Type).
|
||||
Msgf("tailsql only support %q", types.DatabaseSqlite)
|
||||
}
|
||||
|
||||
if tailsqlTSKey == "" {
|
||||
//nolint:gocritic // exitAfterDefer: Fatal exits during initialization before servers start
|
||||
log.Fatal().Msg("tailsql requires TS_AUTHKEY to be set")
|
||||
}
|
||||
|
||||
tailsqlContext = context.Background()
|
||||
go runTailSQLService(ctx, util.TSLogfWrapper(), tailsqlStateDir, h.cfg.Database.Sqlite.Path)
|
||||
|
||||
go runTailSQLService(ctx, util.TSLogfWrapper(), tailsqlStateDir, h.cfg.Database.Sqlite.Path) //nolint:errcheck
|
||||
}
|
||||
|
||||
// Handle common process-killing signals so we can gracefully shut down:
|
||||
@@ -774,6 +824,7 @@ func (h *Headscale) Serve() error {
|
||||
syscall.SIGTERM,
|
||||
syscall.SIGQUIT,
|
||||
syscall.SIGHUP)
|
||||
|
||||
sigFunc := func(c chan os.Signal) {
|
||||
// Wait for a SIGINT or SIGKILL:
|
||||
for {
|
||||
@@ -798,6 +849,7 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
default:
|
||||
info := func(msg string) { log.Info().Msg(msg) }
|
||||
|
||||
log.Info().
|
||||
Str("signal", sig.String()).
|
||||
Msg("Received signal to stop, shutting down gracefully")
|
||||
@@ -854,6 +906,7 @@ func (h *Headscale) Serve() error {
|
||||
if debugHTTPListener != nil {
|
||||
debugHTTPListener.Close()
|
||||
}
|
||||
|
||||
httpListener.Close()
|
||||
grpcGatewayConn.Close()
|
||||
|
||||
@@ -863,6 +916,7 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
// Close state connections
|
||||
info("closing state and database")
|
||||
|
||||
err = h.state.Close()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to close state")
|
||||
@@ -875,6 +929,7 @@ func (h *Headscale) Serve() error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
errorGroup.Go(func() error {
|
||||
sigFunc(sigc)
|
||||
|
||||
@@ -886,6 +941,7 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
var err error
|
||||
|
||||
if h.cfg.TLS.LetsEncrypt.Hostname != "" {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
|
||||
log.Warn().
|
||||
@@ -918,7 +974,6 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
// Configuration via autocert with HTTP-01. This requires listening on
|
||||
// port 80 for the certificate validation in addition to the headscale
|
||||
// service, which can be configured to run on any other port.
|
||||
|
||||
server := &http.Server{
|
||||
Addr: h.cfg.TLS.LetsEncrypt.Listen,
|
||||
Handler: certManager.HTTPHandler(http.HandlerFunc(h.redirect)),
|
||||
@@ -940,13 +995,13 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
}
|
||||
} else if h.cfg.TLS.CertPath == "" {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "http://") {
|
||||
log.Warn().Msg("Listening without TLS but ServerURL does not start with http://")
|
||||
log.Warn().Msg("listening without TLS but ServerURL does not start with http://")
|
||||
}
|
||||
|
||||
return nil, err
|
||||
} else {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
|
||||
log.Warn().Msg("Listening with TLS but ServerURL does not start with https://")
|
||||
log.Warn().Msg("listening with TLS but ServerURL does not start with https://")
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
@@ -963,6 +1018,7 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
|
||||
func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
||||
dir := filepath.Dir(path)
|
||||
|
||||
err := util.EnsureDir(dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ensuring private key directory: %w", err)
|
||||
@@ -970,21 +1026,22 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
||||
|
||||
privateKey, err := os.ReadFile(path)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
log.Info().Str("path", path).Msg("No private key file at path, creating...")
|
||||
log.Info().Str("path", path).Msg("no private key file at path, creating...")
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
machineKeyStr, err := machineKey.MarshalText()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to convert private key to string for saving: %w",
|
||||
"converting private key to string for saving: %w",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
err = os.WriteFile(path, machineKeyStr, privateKeyFileMode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to save private key to disk at path %q: %w",
|
||||
"saving private key to disk at path %q: %w",
|
||||
path,
|
||||
err,
|
||||
)
|
||||
@@ -992,14 +1049,14 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
||||
|
||||
return &machineKey, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("failed to read private key file: %w", err)
|
||||
return nil, fmt.Errorf("reading private key file: %w", err)
|
||||
}
|
||||
|
||||
trimmedPrivateKey := strings.TrimSpace(string(privateKey))
|
||||
|
||||
var machineKey key.MachinePrivate
|
||||
if err = machineKey.UnmarshalText([]byte(trimmedPrivateKey)); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse private key: %w", err)
|
||||
if err = machineKey.UnmarshalText([]byte(trimmedPrivateKey)); err != nil { //nolint:noinlineerr
|
||||
return nil, fmt.Errorf("parsing private key: %w", err)
|
||||
}
|
||||
|
||||
return &machineKey, nil
|
||||
@@ -1023,7 +1080,7 @@ type acmeLogger struct {
|
||||
func (l *acmeLogger) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
resp, err := l.rt.RoundTrip(req)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("url", req.URL.String()).Msg("ACME request failed")
|
||||
log.Error().Err(err).Str("url", req.URL.String()).Msg("acme request failed")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1031,8 +1088,57 @@ func (l *acmeLogger) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
log.Error().Int("status_code", resp.StatusCode).Str("url", req.URL.String()).Bytes("body", body).Msg("ACME request returned error")
|
||||
log.Error().Int("status_code", resp.StatusCode).Str("url", req.URL.String()).Bytes("body", body).Msg("acme request returned error")
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// zerologRequestLogger implements chi's middleware.LogFormatter
|
||||
// to route HTTP request logs through zerolog.
|
||||
type zerologRequestLogger struct{}
|
||||
|
||||
func (z *zerologRequestLogger) NewLogEntry(
|
||||
r *http.Request,
|
||||
) middleware.LogEntry {
|
||||
return &zerologLogEntry{
|
||||
method: r.Method,
|
||||
path: r.URL.Path,
|
||||
proto: r.Proto,
|
||||
remote: r.RemoteAddr,
|
||||
}
|
||||
}
|
||||
|
||||
type zerologLogEntry struct {
|
||||
method string
|
||||
path string
|
||||
proto string
|
||||
remote string
|
||||
}
|
||||
|
||||
func (e *zerologLogEntry) Write(
|
||||
status, bytes int,
|
||||
header http.Header,
|
||||
elapsed time.Duration,
|
||||
extra any,
|
||||
) {
|
||||
log.Info().
|
||||
Str("method", e.method).
|
||||
Str("path", e.path).
|
||||
Str("proto", e.proto).
|
||||
Str("remote", e.remote).
|
||||
Int("status", status).
|
||||
Int("bytes", bytes).
|
||||
Dur("elapsed", elapsed).
|
||||
Msg("http request")
|
||||
}
|
||||
|
||||
func (e *zerologLogEntry) Panic(
|
||||
v any,
|
||||
stack []byte,
|
||||
) {
|
||||
log.Error().
|
||||
Interface("panic", v).
|
||||
Bytes("stack", stack).
|
||||
Msg("http handler panic")
|
||||
}
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 15 KiB |
@@ -16,12 +16,13 @@ import (
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
type AuthProvider interface {
|
||||
RegisterHandler(http.ResponseWriter, *http.Request)
|
||||
AuthURL(types.RegistrationID) string
|
||||
RegisterHandler(w http.ResponseWriter, r *http.Request)
|
||||
AuthHandler(w http.ResponseWriter, r *http.Request)
|
||||
RegisterURL(authID types.AuthID) string
|
||||
AuthURL(authID types.AuthID) string
|
||||
}
|
||||
|
||||
func (h *Headscale) handleRegister(
|
||||
@@ -42,8 +43,7 @@ func (h *Headscale) handleRegister(
|
||||
// This is a logout attempt (expiry in the past)
|
||||
if node, ok := h.state.GetNodeByNodeKey(req.NodeKey); ok {
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
EmbedObject(node).
|
||||
Bool("is_ephemeral", node.IsEphemeral()).
|
||||
Bool("has_authkey", node.AuthKey().Valid()).
|
||||
Msg("Found existing node for logout, calling handleLogout")
|
||||
@@ -52,6 +52,7 @@ func (h *Headscale) handleRegister(
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("handling logout: %w", err)
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
return resp, nil
|
||||
}
|
||||
@@ -113,8 +114,7 @@ func (h *Headscale) handleRegister(
|
||||
resp, err := h.handleRegisterWithAuthKey(req, machineKey)
|
||||
if err != nil {
|
||||
// Preserve HTTPError types so they can be handled properly by the HTTP layer
|
||||
var httpErr HTTPError
|
||||
if errors.As(err, &httpErr) {
|
||||
if httpErr, ok := errors.AsType[HTTPError](err); ok {
|
||||
return nil, httpErr
|
||||
}
|
||||
|
||||
@@ -133,7 +133,7 @@ func (h *Headscale) handleRegister(
|
||||
}
|
||||
|
||||
// handleLogout checks if the [tailcfg.RegisterRequest] is a
|
||||
// logout attempt from a node. If the node is not attempting to
|
||||
// logout attempt from a node. If the node is not attempting to.
|
||||
func (h *Headscale) handleLogout(
|
||||
node types.NodeView,
|
||||
req tailcfg.RegisterRequest,
|
||||
@@ -155,11 +155,12 @@ func (h *Headscale) handleLogout(
|
||||
// force the client to re-authenticate.
|
||||
// TODO(kradalby): I wonder if this is a path we ever hit?
|
||||
if node.IsExpired() {
|
||||
log.Trace().Str("node.name", node.Hostname()).
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
log.Trace().
|
||||
EmbedObject(node).
|
||||
Interface("reg.req", req).
|
||||
Bool("unexpected", true).
|
||||
Msg("Node key expired, forcing re-authentication")
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
NodeKeyExpired: true,
|
||||
MachineAuthorized: false,
|
||||
@@ -182,8 +183,7 @@ func (h *Headscale) handleLogout(
|
||||
// Zero expiry is handled in handleRegister() before calling this function.
|
||||
if req.Expiry.Before(time.Now()) {
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
EmbedObject(node).
|
||||
Bool("is_ephemeral", node.IsEphemeral()).
|
||||
Bool("has_authkey", node.AuthKey().Valid()).
|
||||
Time("req.expiry", req.Expiry).
|
||||
@@ -191,8 +191,7 @@ func (h *Headscale) handleLogout(
|
||||
|
||||
if node.IsEphemeral() {
|
||||
log.Info().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
EmbedObject(node).
|
||||
Msg("Deleting ephemeral node during logout")
|
||||
|
||||
c, err := h.state.DeleteNode(node)
|
||||
@@ -209,14 +208,15 @@ func (h *Headscale) handleLogout(
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
EmbedObject(node).
|
||||
Msg("Node is not ephemeral, setting expiry instead of deleting")
|
||||
}
|
||||
|
||||
// Update the internal state with the nodes new expiry, meaning it is
|
||||
// logged out.
|
||||
updatedNode, c, err := h.state.SetNodeExpiry(node.ID(), req.Expiry)
|
||||
expiry := req.Expiry
|
||||
|
||||
updatedNode, c, err := h.state.SetNodeExpiry(node.ID(), &expiry)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting node expiry: %w", err)
|
||||
}
|
||||
@@ -265,21 +265,24 @@ func (h *Headscale) waitForFollowup(
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "invalid followup URL", err)
|
||||
}
|
||||
|
||||
followupReg, err := types.RegistrationIDFromString(strings.ReplaceAll(fu.Path, "/register/", ""))
|
||||
followupReg, err := types.AuthIDFromString(strings.ReplaceAll(fu.Path, "/register/", ""))
|
||||
if err != nil {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "invalid registration ID", err)
|
||||
}
|
||||
|
||||
if reg, ok := h.state.GetRegistrationCacheEntry(followupReg); ok {
|
||||
if reg, ok := h.state.GetAuthCacheEntry(followupReg); ok {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "registration timed out", err)
|
||||
case node := <-reg.Registered:
|
||||
if node == nil {
|
||||
// registration is expired in the cache, instruct the client to try a new registration
|
||||
return h.reqToNewRegisterResponse(req, machineKey)
|
||||
case verdict := <-reg.WaitForAuth():
|
||||
if verdict.Accept() {
|
||||
if !verdict.Node.Valid() {
|
||||
// registration is expired in the cache, instruct the client to try a new registration
|
||||
return h.reqToNewRegisterResponse(req, machineKey)
|
||||
}
|
||||
|
||||
return nodeToRegisterResponse(verdict.Node), nil
|
||||
}
|
||||
return nodeToRegisterResponse(node.View()), nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -294,14 +297,14 @@ func (h *Headscale) reqToNewRegisterResponse(
|
||||
req tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
newRegID, err := types.NewRegistrationID()
|
||||
newAuthID, err := types.NewAuthID()
|
||||
if err != nil {
|
||||
return nil, NewHTTPError(http.StatusInternalServerError, "failed to generate registration ID", err)
|
||||
}
|
||||
|
||||
// Ensure we have a valid hostname
|
||||
hostname := util.EnsureHostname(
|
||||
req.Hostinfo,
|
||||
req.Hostinfo.View(),
|
||||
machineKey.String(),
|
||||
req.NodeKey.String(),
|
||||
)
|
||||
@@ -310,25 +313,25 @@ func (h *Headscale) reqToNewRegisterResponse(
|
||||
hostinfo := cmp.Or(req.Hostinfo, &tailcfg.Hostinfo{})
|
||||
hostinfo.Hostname = hostname
|
||||
|
||||
nodeToRegister := types.NewRegisterNode(
|
||||
types.Node{
|
||||
Hostname: hostname,
|
||||
MachineKey: machineKey,
|
||||
NodeKey: req.NodeKey,
|
||||
Hostinfo: hostinfo,
|
||||
LastSeen: ptr.To(time.Now()),
|
||||
},
|
||||
)
|
||||
|
||||
if !req.Expiry.IsZero() {
|
||||
nodeToRegister.Node.Expiry = &req.Expiry
|
||||
nodeToRegister := types.Node{
|
||||
Hostname: hostname,
|
||||
MachineKey: machineKey,
|
||||
NodeKey: req.NodeKey,
|
||||
Hostinfo: hostinfo,
|
||||
LastSeen: new(time.Now()),
|
||||
}
|
||||
|
||||
log.Info().Msgf("New followup node registration using key: %s", newRegID)
|
||||
h.state.SetRegistrationCacheEntry(newRegID, nodeToRegister)
|
||||
if !req.Expiry.IsZero() {
|
||||
nodeToRegister.Expiry = &req.Expiry
|
||||
}
|
||||
|
||||
authRegReq := types.NewRegisterAuthRequest(nodeToRegister)
|
||||
|
||||
log.Info().Msgf("new followup node registration using auth id: %s", newAuthID)
|
||||
h.state.SetAuthCacheEntry(newAuthID, authRegReq)
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
AuthURL: h.authProvider.AuthURL(newRegID),
|
||||
AuthURL: h.authProvider.RegisterURL(newAuthID),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -344,8 +347,8 @@ func (h *Headscale) handleRegisterWithAuthKey(
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "invalid pre auth key", nil)
|
||||
}
|
||||
var perr types.PAKError
|
||||
if errors.As(err, &perr) {
|
||||
|
||||
if perr, ok := errors.AsType[types.PAKError](err); ok {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, perr.Error(), nil)
|
||||
}
|
||||
|
||||
@@ -355,7 +358,7 @@ func (h *Headscale) handleRegisterWithAuthKey(
|
||||
// If node is not valid, it means an ephemeral node was deleted during logout
|
||||
if !node.Valid() {
|
||||
h.Change(changed)
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil // intentional: no node to return when ephemeral deleted
|
||||
}
|
||||
|
||||
// This is a bit of a back and forth, but we have a bit of a chicken and egg
|
||||
@@ -379,13 +382,6 @@ func (h *Headscale) handleRegisterWithAuthKey(
|
||||
// Send both changes. Empty changes are ignored by Change().
|
||||
h.Change(changed, routesChange)
|
||||
|
||||
// TODO(kradalby): I think this is covered above, but we need to validate that.
|
||||
// // If policy changed due to node registration, send a separate policy change
|
||||
// if policyChanged {
|
||||
// policyChange := change.PolicyChange()
|
||||
// h.Change(policyChange)
|
||||
// }
|
||||
|
||||
resp := &tailcfg.RegisterResponse{
|
||||
MachineAuthorized: true,
|
||||
NodeKeyExpired: node.IsExpired(),
|
||||
@@ -397,8 +393,7 @@ func (h *Headscale) handleRegisterWithAuthKey(
|
||||
Caller().
|
||||
Interface("reg.resp", resp).
|
||||
Interface("reg.req", req).
|
||||
Str("node.name", node.Hostname()).
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
EmbedObject(node).
|
||||
Msg("RegisterResponse")
|
||||
|
||||
return resp, nil
|
||||
@@ -408,14 +403,14 @@ func (h *Headscale) handleRegisterInteractive(
|
||||
req tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
registrationId, err := types.NewRegistrationID()
|
||||
authID, err := types.NewAuthID()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("generating registration ID: %w", err)
|
||||
}
|
||||
|
||||
// Ensure we have a valid hostname
|
||||
hostname := util.EnsureHostname(
|
||||
req.Hostinfo,
|
||||
req.Hostinfo.View(),
|
||||
machineKey.String(),
|
||||
req.NodeKey.String(),
|
||||
)
|
||||
@@ -435,30 +430,31 @@ func (h *Headscale) handleRegisterInteractive(
|
||||
Str("generated.hostname", hostname).
|
||||
Msg("Received registration request with empty hostname, generated default")
|
||||
}
|
||||
|
||||
hostinfo.Hostname = hostname
|
||||
|
||||
nodeToRegister := types.NewRegisterNode(
|
||||
types.Node{
|
||||
Hostname: hostname,
|
||||
MachineKey: machineKey,
|
||||
NodeKey: req.NodeKey,
|
||||
Hostinfo: hostinfo,
|
||||
LastSeen: ptr.To(time.Now()),
|
||||
},
|
||||
)
|
||||
|
||||
if !req.Expiry.IsZero() {
|
||||
nodeToRegister.Node.Expiry = &req.Expiry
|
||||
nodeToRegister := types.Node{
|
||||
Hostname: hostname,
|
||||
MachineKey: machineKey,
|
||||
NodeKey: req.NodeKey,
|
||||
Hostinfo: hostinfo,
|
||||
LastSeen: new(time.Now()),
|
||||
}
|
||||
|
||||
h.state.SetRegistrationCacheEntry(
|
||||
registrationId,
|
||||
nodeToRegister,
|
||||
if !req.Expiry.IsZero() {
|
||||
nodeToRegister.Expiry = &req.Expiry
|
||||
}
|
||||
|
||||
authRegReq := types.NewRegisterAuthRequest(nodeToRegister)
|
||||
|
||||
h.state.SetAuthCacheEntry(
|
||||
authID,
|
||||
authRegReq,
|
||||
)
|
||||
|
||||
log.Info().Msgf("Starting node registration using key: %s", registrationId)
|
||||
log.Info().Msgf("starting node registration using auth id: %s", authID)
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
AuthURL: h.authProvider.AuthURL(registrationId),
|
||||
AuthURL: h.authProvider.RegisterURL(authID),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
// TestTaggedPreAuthKeyCreatesTaggedNode tests that a PreAuthKey with tags creates
|
||||
// a tagged node with:
|
||||
// - Tags from the PreAuthKey
|
||||
// - UserID tracking who created the key (informational "created by")
|
||||
// - Nil UserID (tagged nodes are owned by tags, not a user)
|
||||
// - IsTagged() returns true.
|
||||
func TestTaggedPreAuthKeyCreatesTaggedNode(t *testing.T) {
|
||||
app := createTestApp(t)
|
||||
@@ -51,11 +51,10 @@ func TestTaggedPreAuthKeyCreatesTaggedNode(t *testing.T) {
|
||||
node, found := app.state.GetNodeByNodeKey(nodeKey.Public())
|
||||
require.True(t, found)
|
||||
|
||||
// Critical assertions for tags-as-identity model
|
||||
// Tagged nodes are owned by their tags, not a user.
|
||||
assert.True(t, node.IsTagged(), "Node should be tagged")
|
||||
assert.ElementsMatch(t, tags, node.Tags().AsSlice(), "Node should have tags from PreAuthKey")
|
||||
assert.True(t, node.UserID().Valid(), "Node should have UserID tracking creator")
|
||||
assert.Equal(t, user.ID, node.UserID().Get(), "UserID should track PreAuthKey creator")
|
||||
assert.False(t, node.UserID().Valid(), "Tagged node should not have UserID")
|
||||
|
||||
// Verify node is identified correctly
|
||||
assert.True(t, node.IsTagged(), "Tagged node is not user-owned")
|
||||
@@ -129,9 +128,10 @@ func TestReAuthDoesNotReapplyTags(t *testing.T) {
|
||||
assert.True(t, nodeAfterReauth.IsTagged(), "Node should still be tagged")
|
||||
assert.ElementsMatch(t, initialTags, nodeAfterReauth.Tags().AsSlice(), "Tags should remain unchanged on re-auth")
|
||||
|
||||
// Verify only one node was created (no duplicates)
|
||||
nodes := app.state.ListNodesByUser(types.UserID(user.ID))
|
||||
assert.Equal(t, 1, nodes.Len(), "Should have exactly one node")
|
||||
// Verify only one node was created (no duplicates).
|
||||
// Tagged nodes are not indexed by user, so check the global list.
|
||||
allNodes := app.state.ListNodes()
|
||||
assert.Equal(t, 1, allNodes.Len(), "Should have exactly one node")
|
||||
}
|
||||
|
||||
// NOTE: TestSetTagsOnUserOwnedNode functionality is covered by gRPC tests in grpcv1_test.go
|
||||
@@ -294,13 +294,13 @@ func TestMultipleNodesWithSameReusableTaggedPreAuthKey(t *testing.T) {
|
||||
assert.ElementsMatch(t, tags, node1.Tags().AsSlice(), "First node should have PreAuthKey tags")
|
||||
assert.ElementsMatch(t, tags, node2.Tags().AsSlice(), "Second node should have PreAuthKey tags")
|
||||
|
||||
// Both nodes should track the same creator
|
||||
assert.Equal(t, user.ID, node1.UserID().Get(), "First node should track creator")
|
||||
assert.Equal(t, user.ID, node2.UserID().Get(), "Second node should track creator")
|
||||
// Tagged nodes should not have UserID set.
|
||||
assert.False(t, node1.UserID().Valid(), "First node should not have UserID")
|
||||
assert.False(t, node2.UserID().Valid(), "Second node should not have UserID")
|
||||
|
||||
// Verify we have exactly 2 nodes
|
||||
nodes := app.state.ListNodesByUser(types.UserID(user.ID))
|
||||
assert.Equal(t, 2, nodes.Len(), "Should have exactly two nodes")
|
||||
// Verify we have exactly 2 nodes.
|
||||
allNodes := app.state.ListNodes()
|
||||
assert.Equal(t, 2, allNodes.Len(), "Should have exactly two nodes")
|
||||
}
|
||||
|
||||
// TestNonReusableTaggedPreAuthKey tests that a non-reusable PreAuthKey with tags
|
||||
@@ -359,9 +359,9 @@ func TestNonReusableTaggedPreAuthKey(t *testing.T) {
|
||||
_, err = app.handleRegisterWithAuthKey(regReq2, machineKey2.Public())
|
||||
require.Error(t, err, "Should not be able to reuse non-reusable PreAuthKey")
|
||||
|
||||
// Verify only one node was created
|
||||
nodes := app.state.ListNodesByUser(types.UserID(user.ID))
|
||||
assert.Equal(t, 1, nodes.Len(), "Should have exactly one node")
|
||||
// Verify only one node was created.
|
||||
allNodes := app.state.ListNodes()
|
||||
assert.Equal(t, 1, allNodes.Len(), "Should have exactly one node")
|
||||
}
|
||||
|
||||
// TestExpiredTaggedPreAuthKey tests that an expired PreAuthKey with tags
|
||||
@@ -625,6 +625,152 @@ func TestTaggedNodeReauthPreservesDisabledExpiry(t *testing.T) {
|
||||
"Tagged node should have expiry PRESERVED as disabled after re-auth")
|
||||
}
|
||||
|
||||
// TestExpiryDuringPersonalToTaggedConversion tests that when a personal node
|
||||
// is converted to tagged via reauth with RequestTags, the expiry is cleared to nil.
|
||||
// BUG #3048: Previously expiry was NOT cleared because expiry handling ran
|
||||
// BEFORE processReauthTags.
|
||||
func TestExpiryDuringPersonalToTaggedConversion(t *testing.T) {
|
||||
app := createTestApp(t)
|
||||
user := app.state.CreateUserForTest("expiry-test-user")
|
||||
|
||||
// Update policy to allow user to own tags
|
||||
err := app.state.UpdatePolicyManagerUsersForTest()
|
||||
require.NoError(t, err)
|
||||
|
||||
policy := `{
|
||||
"tagOwners": {
|
||||
"tag:server": ["expiry-test-user@"]
|
||||
},
|
||||
"acls": [{"action": "accept", "src": ["*"], "dst": ["*:*"]}]
|
||||
}`
|
||||
_, err = app.state.SetPolicy([]byte(policy))
|
||||
require.NoError(t, err)
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey1 := key.NewNode()
|
||||
|
||||
// Step 1: Create user-owned node WITH expiry set
|
||||
clientExpiry := time.Now().Add(24 * time.Hour)
|
||||
registrationID1 := types.MustAuthID()
|
||||
regEntry1 := types.NewRegisterAuthRequest(types.Node{
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey1.Public(),
|
||||
Hostname: "personal-to-tagged",
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "personal-to-tagged",
|
||||
RequestTags: []string{}, // No tags - user-owned
|
||||
},
|
||||
Expiry: &clientExpiry,
|
||||
})
|
||||
app.state.SetAuthCacheEntry(registrationID1, regEntry1)
|
||||
|
||||
node, _, err := app.state.HandleNodeFromAuthPath(
|
||||
registrationID1, types.UserID(user.ID), nil, "webauth",
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.False(t, node.IsTagged(), "Node should be user-owned initially")
|
||||
require.True(t, node.Expiry().Valid(), "User-owned node should have expiry set")
|
||||
|
||||
// Step 2: Re-auth with tags (Personal → Tagged conversion)
|
||||
nodeKey2 := key.NewNode()
|
||||
registrationID2 := types.MustAuthID()
|
||||
regEntry2 := types.NewRegisterAuthRequest(types.Node{
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey2.Public(),
|
||||
Hostname: "personal-to-tagged",
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "personal-to-tagged",
|
||||
RequestTags: []string{"tag:server"}, // Adding tags
|
||||
},
|
||||
Expiry: &clientExpiry, // Client still sends expiry
|
||||
})
|
||||
app.state.SetAuthCacheEntry(registrationID2, regEntry2)
|
||||
|
||||
nodeAfter, _, err := app.state.HandleNodeFromAuthPath(
|
||||
registrationID2, types.UserID(user.ID), nil, "webauth",
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.True(t, nodeAfter.IsTagged(), "Node should be tagged after conversion")
|
||||
|
||||
// CRITICAL ASSERTION: Tagged nodes should NOT have expiry
|
||||
assert.False(t, nodeAfter.Expiry().Valid(),
|
||||
"Tagged node should have expiry cleared to nil")
|
||||
}
|
||||
|
||||
// TestExpiryDuringTaggedToPersonalConversion tests that when a tagged node
|
||||
// is converted to personal via reauth with empty RequestTags, expiry is set
|
||||
// from the client request.
|
||||
// BUG #3048: Previously expiry was NOT set because expiry handling ran
|
||||
// BEFORE processReauthTags (node was still tagged at check time).
|
||||
func TestExpiryDuringTaggedToPersonalConversion(t *testing.T) {
|
||||
app := createTestApp(t)
|
||||
user := app.state.CreateUserForTest("expiry-test-user2")
|
||||
|
||||
// Update policy to allow user to own tags
|
||||
err := app.state.UpdatePolicyManagerUsersForTest()
|
||||
require.NoError(t, err)
|
||||
|
||||
policy := `{
|
||||
"tagOwners": {
|
||||
"tag:server": ["expiry-test-user2@"]
|
||||
},
|
||||
"acls": [{"action": "accept", "src": ["*"], "dst": ["*:*"]}]
|
||||
}`
|
||||
_, err = app.state.SetPolicy([]byte(policy))
|
||||
require.NoError(t, err)
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey1 := key.NewNode()
|
||||
|
||||
// Step 1: Create tagged node (expiry should be nil)
|
||||
registrationID1 := types.MustAuthID()
|
||||
regEntry1 := types.NewRegisterAuthRequest(types.Node{
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey1.Public(),
|
||||
Hostname: "tagged-to-personal",
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "tagged-to-personal",
|
||||
RequestTags: []string{"tag:server"}, // Tagged node
|
||||
},
|
||||
})
|
||||
app.state.SetAuthCacheEntry(registrationID1, regEntry1)
|
||||
|
||||
node, _, err := app.state.HandleNodeFromAuthPath(
|
||||
registrationID1, types.UserID(user.ID), nil, "webauth",
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.True(t, node.IsTagged(), "Node should be tagged initially")
|
||||
require.False(t, node.Expiry().Valid(), "Tagged node should have nil expiry")
|
||||
|
||||
// Step 2: Re-auth with empty tags (Tagged → Personal conversion)
|
||||
nodeKey2 := key.NewNode()
|
||||
clientExpiry := time.Now().Add(48 * time.Hour)
|
||||
registrationID2 := types.MustAuthID()
|
||||
regEntry2 := types.NewRegisterAuthRequest(types.Node{
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey2.Public(),
|
||||
Hostname: "tagged-to-personal",
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "tagged-to-personal",
|
||||
RequestTags: []string{}, // Empty tags - convert to user-owned
|
||||
},
|
||||
Expiry: &clientExpiry, // Client requests expiry
|
||||
})
|
||||
app.state.SetAuthCacheEntry(registrationID2, regEntry2)
|
||||
|
||||
nodeAfter, _, err := app.state.HandleNodeFromAuthPath(
|
||||
registrationID2, types.UserID(user.ID), nil, "webauth",
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.False(t, nodeAfter.IsTagged(), "Node should be user-owned after conversion")
|
||||
|
||||
// CRITICAL ASSERTION: User-owned nodes should have expiry from client
|
||||
assert.True(t, nodeAfter.Expiry().Valid(),
|
||||
"User-owned node should have expiry set")
|
||||
assert.WithinDuration(t, clientExpiry, nodeAfter.Expiry().Get(), 5*time.Second,
|
||||
"Expiry should match client request")
|
||||
}
|
||||
|
||||
// TestReAuthWithDifferentMachineKey tests the edge case where a node attempts
|
||||
// to re-authenticate with the same NodeKey but a DIFFERENT MachineKey.
|
||||
// This scenario should be handled gracefully (currently creates a new node).
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -40,6 +40,7 @@ var tailscaleToCapVer = map[string]tailcfg.CapabilityVersion{
|
||||
"v1.88": 125,
|
||||
"v1.90": 130,
|
||||
"v1.92": 131,
|
||||
"v1.94": 131,
|
||||
}
|
||||
|
||||
var capVerToTailscaleVer = map[tailcfg.CapabilityVersion]string{
|
||||
|
||||
@@ -9,10 +9,9 @@ var tailscaleLatestMajorMinorTests = []struct {
|
||||
stripV bool
|
||||
expected []string
|
||||
}{
|
||||
{3, false, []string{"v1.88", "v1.90", "v1.92"}},
|
||||
{2, true, []string{"1.90", "1.92"}},
|
||||
{3, false, []string{"v1.90", "v1.92", "v1.94"}},
|
||||
{2, true, []string{"1.92", "1.94"}},
|
||||
{10, true, []string{
|
||||
"1.74",
|
||||
"1.76",
|
||||
"1.78",
|
||||
"1.80",
|
||||
@@ -22,6 +21,7 @@ var tailscaleLatestMajorMinorTests = []struct {
|
||||
"1.88",
|
||||
"1.90",
|
||||
"1.92",
|
||||
"1.94",
|
||||
}},
|
||||
{0, false, nil},
|
||||
}
|
||||
|
||||
@@ -77,8 +77,8 @@ func (hsdb *HSDatabase) CreateAPIKey(
|
||||
Expiration: expiration,
|
||||
}
|
||||
|
||||
if err := hsdb.DB.Save(&key).Error; err != nil {
|
||||
return "", nil, fmt.Errorf("failed to save API key to database: %w", err)
|
||||
if err := hsdb.DB.Save(&key).Error; err != nil { //nolint:noinlineerr
|
||||
return "", nil, fmt.Errorf("saving API key to database: %w", err)
|
||||
}
|
||||
|
||||
return keyStr, &key, nil
|
||||
@@ -87,7 +87,9 @@ func (hsdb *HSDatabase) CreateAPIKey(
|
||||
// ListAPIKeys returns the list of ApiKeys for a user.
|
||||
func (hsdb *HSDatabase) ListAPIKeys() ([]types.APIKey, error) {
|
||||
keys := []types.APIKey{}
|
||||
if err := hsdb.DB.Find(&keys).Error; err != nil {
|
||||
|
||||
err := hsdb.DB.Find(&keys).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -126,7 +128,8 @@ func (hsdb *HSDatabase) DestroyAPIKey(key types.APIKey) error {
|
||||
|
||||
// ExpireAPIKey marks a ApiKey as expired.
|
||||
func (hsdb *HSDatabase) ExpireAPIKey(key *types.APIKey) error {
|
||||
if err := hsdb.DB.Model(&key).Update("Expiration", time.Now()).Error; err != nil {
|
||||
err := hsdb.DB.Model(&key).Update("Expiration", time.Now()).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
"gorm.io/gorm/schema"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"zgo.at/zcache/v2"
|
||||
)
|
||||
|
||||
@@ -48,20 +47,27 @@ const (
|
||||
type HSDatabase struct {
|
||||
DB *gorm.DB
|
||||
cfg *types.Config
|
||||
regCache *zcache.Cache[types.RegistrationID, types.RegisterNode]
|
||||
regCache *zcache.Cache[types.AuthID, types.AuthRequest]
|
||||
}
|
||||
|
||||
// NewHeadscaleDatabase creates a new database connection and runs migrations.
|
||||
// It accepts the full configuration to allow migrations access to policy settings.
|
||||
//
|
||||
//nolint:gocyclo // complex database initialization with many migrations
|
||||
func NewHeadscaleDatabase(
|
||||
cfg *types.Config,
|
||||
regCache *zcache.Cache[types.RegistrationID, types.RegisterNode],
|
||||
regCache *zcache.Cache[types.AuthID, types.AuthRequest],
|
||||
) (*HSDatabase, error) {
|
||||
dbConn, err := openDB(cfg.Database)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = checkVersionUpgradePath(dbConn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("version check: %w", err)
|
||||
}
|
||||
|
||||
migrations := gormigrate.New(
|
||||
dbConn,
|
||||
gormigrate.DefaultOptions,
|
||||
@@ -76,7 +82,7 @@ func NewHeadscaleDatabase(
|
||||
ID: "202501221827",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
// Remove any invalid routes associated with a node that does not exist.
|
||||
if tx.Migrator().HasTable(&types.Route{}) && tx.Migrator().HasTable(&types.Node{}) {
|
||||
if tx.Migrator().HasTable(&types.Route{}) && tx.Migrator().HasTable(&types.Node{}) { //nolint:staticcheck // SA1019: Route kept for migrations
|
||||
err := tx.Exec("delete from routes where node_id not in (select id from nodes)").Error
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -84,14 +90,14 @@ func NewHeadscaleDatabase(
|
||||
}
|
||||
|
||||
// Remove any invalid routes without a node_id.
|
||||
if tx.Migrator().HasTable(&types.Route{}) {
|
||||
if tx.Migrator().HasTable(&types.Route{}) { //nolint:staticcheck // SA1019: Route kept for migrations
|
||||
err := tx.Exec("delete from routes where node_id is null").Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := tx.AutoMigrate(&types.Route{})
|
||||
err := tx.AutoMigrate(&types.Route{}) //nolint:staticcheck // SA1019: Route kept for migrations
|
||||
if err != nil {
|
||||
return fmt.Errorf("automigrating types.Route: %w", err)
|
||||
}
|
||||
@@ -109,6 +115,7 @@ func NewHeadscaleDatabase(
|
||||
if err != nil {
|
||||
return fmt.Errorf("automigrating types.PreAuthKey: %w", err)
|
||||
}
|
||||
|
||||
err = tx.AutoMigrate(&types.Node{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("automigrating types.Node: %w", err)
|
||||
@@ -155,7 +162,8 @@ AND auth_key_id NOT IN (
|
||||
|
||||
nodeRoutes := map[uint64][]netip.Prefix{}
|
||||
|
||||
var routes []types.Route
|
||||
var routes []types.Route //nolint:staticcheck // SA1019: Route kept for migrations
|
||||
|
||||
err = tx.Find(&routes).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching routes: %w", err)
|
||||
@@ -168,10 +176,10 @@ AND auth_key_id NOT IN (
|
||||
}
|
||||
|
||||
for nodeID, routes := range nodeRoutes {
|
||||
tsaddr.SortPrefixes(routes)
|
||||
slices.SortFunc(routes, netip.Prefix.Compare)
|
||||
routes = slices.Compact(routes)
|
||||
|
||||
data, err := json.Marshal(routes)
|
||||
data, _ := json.Marshal(routes)
|
||||
|
||||
err = tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", data).Error
|
||||
if err != nil {
|
||||
@@ -180,7 +188,7 @@ AND auth_key_id NOT IN (
|
||||
}
|
||||
|
||||
// Drop the old table.
|
||||
_ = tx.Migrator().DropTable(&types.Route{})
|
||||
_ = tx.Migrator().DropTable(&types.Route{}) //nolint:staticcheck // SA1019: Route kept for migrations
|
||||
|
||||
return nil
|
||||
},
|
||||
@@ -245,21 +253,24 @@ AND auth_key_id NOT IN (
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
// Only run on SQLite
|
||||
if cfg.Database.Type != types.DatabaseSqlite {
|
||||
log.Info().Msg("Skipping schema migration on non-SQLite database")
|
||||
log.Info().Msg("skipping schema migration on non-SQLite database")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info().Msg("Starting schema recreation with table renaming")
|
||||
log.Info().Msg("starting schema recreation with table renaming")
|
||||
|
||||
// Rename existing tables to _old versions
|
||||
tablesToRename := []string{"users", "pre_auth_keys", "api_keys", "nodes", "policies"}
|
||||
|
||||
// Check if routes table exists and drop it (should have been migrated already)
|
||||
var routesExists bool
|
||||
|
||||
err := tx.Raw("SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='routes'").Row().Scan(&routesExists)
|
||||
if err == nil && routesExists {
|
||||
log.Info().Msg("Dropping leftover routes table")
|
||||
if err := tx.Exec("DROP TABLE routes").Error; err != nil {
|
||||
log.Info().Msg("dropping leftover routes table")
|
||||
|
||||
err := tx.Exec("DROP TABLE routes").Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("dropping routes table: %w", err)
|
||||
}
|
||||
}
|
||||
@@ -281,6 +292,7 @@ AND auth_key_id NOT IN (
|
||||
for _, table := range tablesToRename {
|
||||
// Check if table exists before renaming
|
||||
var exists bool
|
||||
|
||||
err := tx.Raw("SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name=?", table).Row().Scan(&exists)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking if table %s exists: %w", table, err)
|
||||
@@ -291,7 +303,8 @@ AND auth_key_id NOT IN (
|
||||
_ = tx.Exec("DROP TABLE IF EXISTS " + table + "_old").Error
|
||||
|
||||
// Rename current table to _old
|
||||
if err := tx.Exec("ALTER TABLE " + table + " RENAME TO " + table + "_old").Error; err != nil {
|
||||
err := tx.Exec("ALTER TABLE " + table + " RENAME TO " + table + "_old").Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("renaming table %s to %s_old: %w", table, table, err)
|
||||
}
|
||||
}
|
||||
@@ -365,7 +378,8 @@ AND auth_key_id NOT IN (
|
||||
}
|
||||
|
||||
for _, createSQL := range tableCreationSQL {
|
||||
if err := tx.Exec(createSQL).Error; err != nil {
|
||||
err := tx.Exec(createSQL).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating new table: %w", err)
|
||||
}
|
||||
}
|
||||
@@ -394,7 +408,8 @@ AND auth_key_id NOT IN (
|
||||
}
|
||||
|
||||
for _, copySQL := range dataCopySQL {
|
||||
if err := tx.Exec(copySQL).Error; err != nil {
|
||||
err := tx.Exec(copySQL).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("copying data: %w", err)
|
||||
}
|
||||
}
|
||||
@@ -417,19 +432,21 @@ AND auth_key_id NOT IN (
|
||||
}
|
||||
|
||||
for _, indexSQL := range indexes {
|
||||
if err := tx.Exec(indexSQL).Error; err != nil {
|
||||
err := tx.Exec(indexSQL).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating index: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Drop old tables only after everything succeeds
|
||||
for _, table := range tablesToRename {
|
||||
if err := tx.Exec("DROP TABLE IF EXISTS " + table + "_old").Error; err != nil {
|
||||
log.Warn().Str("table", table+"_old").Err(err).Msg("Failed to drop old table, but migration succeeded")
|
||||
err := tx.Exec("DROP TABLE IF EXISTS " + table + "_old").Error
|
||||
if err != nil {
|
||||
log.Warn().Str("table", table+"_old").Err(err).Msg("failed to drop old table, but migration succeeded")
|
||||
}
|
||||
}
|
||||
|
||||
log.Info().Msg("Schema recreation completed successfully")
|
||||
log.Info().Msg("schema recreation completed successfully")
|
||||
|
||||
return nil
|
||||
},
|
||||
@@ -595,12 +612,12 @@ AND auth_key_id NOT IN (
|
||||
// 1. Load policy from file or database based on configuration
|
||||
policyData, err := PolicyBytes(tx, cfg)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to load policy, skipping RequestTags migration (tags will be validated on node reconnect)")
|
||||
log.Warn().Err(err).Msg("failed to load policy, skipping RequestTags migration (tags will be validated on node reconnect)")
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(policyData) == 0 {
|
||||
log.Info().Msg("No policy found, skipping RequestTags migration (tags will be validated on node reconnect)")
|
||||
log.Info().Msg("no policy found, skipping RequestTags migration (tags will be validated on node reconnect)")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -618,7 +635,7 @@ AND auth_key_id NOT IN (
|
||||
// 3. Create PolicyManager (handles HuJSON parsing, groups, nested tags, etc.)
|
||||
polMan, err := policy.NewPolicyManager(policyData, users, nodes.ViewSlice())
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to parse policy, skipping RequestTags migration (tags will be validated on node reconnect)")
|
||||
log.Warn().Err(err).Msg("failed to parse policy, skipping RequestTags migration (tags will be validated on node reconnect)")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -652,8 +669,7 @@ AND auth_key_id NOT IN (
|
||||
if len(validatedTags) == 0 {
|
||||
if len(rejectedTags) > 0 {
|
||||
log.Debug().
|
||||
Uint64("node.id", uint64(node.ID)).
|
||||
Str("node.name", node.Hostname).
|
||||
EmbedObject(node).
|
||||
Strs("rejected_tags", rejectedTags).
|
||||
Msg("RequestTags rejected during migration (not authorized)")
|
||||
}
|
||||
@@ -676,8 +692,7 @@ AND auth_key_id NOT IN (
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Uint64("node.id", uint64(node.ID)).
|
||||
Str("node.name", node.Hostname).
|
||||
EmbedObject(node).
|
||||
Strs("validated_tags", validatedTags).
|
||||
Strs("rejected_tags", rejectedTags).
|
||||
Strs("existing_tags", existingTags).
|
||||
@@ -689,6 +704,29 @@ AND auth_key_id NOT IN (
|
||||
},
|
||||
Rollback: func(db *gorm.DB) error { return nil },
|
||||
},
|
||||
{
|
||||
// Clear user_id on tagged nodes.
|
||||
// Tagged nodes are owned by their tags, not a user.
|
||||
// Previously user_id was kept as "created by" tracking,
|
||||
// but this prevents deleting users whose nodes have been
|
||||
// tagged, and the ON DELETE CASCADE FK would destroy the
|
||||
// tagged nodes if the user were deleted.
|
||||
// Fixes: https://github.com/juanfont/headscale/issues/3077
|
||||
ID: "202602201200-clear-tagged-node-user-id",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
err := tx.Exec(`
|
||||
UPDATE nodes
|
||||
SET user_id = NULL
|
||||
WHERE tags IS NOT NULL AND tags != '[]' AND tags != '';
|
||||
`).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("clearing user_id on tagged nodes: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(db *gorm.DB) error { return nil },
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -750,6 +788,20 @@ AND auth_key_id NOT IN (
|
||||
return nil, fmt.Errorf("migration failed: %w", err)
|
||||
}
|
||||
|
||||
// Store the current version in the database after migrations succeed.
|
||||
// Dev builds skip this to preserve the stored version for the next
|
||||
// real versioned binary.
|
||||
currentVersion := types.GetVersionInfo().Version
|
||||
if !isDev(currentVersion) {
|
||||
err = setDatabaseVersion(dbConn, currentVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"storing database version: %w",
|
||||
err,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate that the schema ends up in the expected state.
|
||||
// This is currently only done on sqlite as squibble does not
|
||||
// support Postgres and we use our sqlite schema as our source of
|
||||
@@ -762,6 +814,7 @@ AND auth_key_id NOT IN (
|
||||
|
||||
// or else it blocks...
|
||||
sqlConn.SetMaxIdleConns(maxIdleConns)
|
||||
|
||||
sqlConn.SetMaxOpenConns(maxOpenConns)
|
||||
defer sqlConn.SetMaxIdleConns(1)
|
||||
defer sqlConn.SetMaxOpenConns(1)
|
||||
@@ -779,7 +832,7 @@ AND auth_key_id NOT IN (
|
||||
},
|
||||
}
|
||||
|
||||
if err := squibble.Validate(ctx, sqlConn, dbSchema, &opts); err != nil {
|
||||
if err := squibble.Validate(ctx, sqlConn, dbSchema, &opts); err != nil { //nolint:noinlineerr
|
||||
return nil, fmt.Errorf("validating schema: %w", err)
|
||||
}
|
||||
}
|
||||
@@ -805,6 +858,7 @@ func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) {
|
||||
switch cfg.Type {
|
||||
case types.DatabaseSqlite:
|
||||
dir := filepath.Dir(cfg.Sqlite.Path)
|
||||
|
||||
err := util.EnsureDir(dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating directory for sqlite: %w", err)
|
||||
@@ -858,7 +912,7 @@ func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) {
|
||||
Str("path", dbString).
|
||||
Msg("Opening database")
|
||||
|
||||
if sslEnabled, err := strconv.ParseBool(cfg.Postgres.Ssl); err == nil {
|
||||
if sslEnabled, err := strconv.ParseBool(cfg.Postgres.Ssl); err == nil { //nolint:noinlineerr
|
||||
if !sslEnabled {
|
||||
dbString += " sslmode=disable"
|
||||
}
|
||||
@@ -913,7 +967,7 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig
|
||||
|
||||
// Get the current foreign key status
|
||||
var fkOriginallyEnabled int
|
||||
if err := dbConn.Raw("PRAGMA foreign_keys").Scan(&fkOriginallyEnabled).Error; err != nil {
|
||||
if err := dbConn.Raw("PRAGMA foreign_keys").Scan(&fkOriginallyEnabled).Error; err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("checking foreign key status: %w", err)
|
||||
}
|
||||
|
||||
@@ -937,33 +991,36 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig
|
||||
}
|
||||
|
||||
for _, migrationID := range migrationIDs {
|
||||
log.Trace().Caller().Str("migration_id", migrationID).Msg("Running migration")
|
||||
log.Trace().Caller().Str("migration_id", migrationID).Msg("running migration")
|
||||
needsFKDisabled := migrationsRequiringFKDisabled[migrationID]
|
||||
|
||||
if needsFKDisabled {
|
||||
// Disable foreign keys for this migration
|
||||
if err := dbConn.Exec("PRAGMA foreign_keys = OFF").Error; err != nil {
|
||||
err := dbConn.Exec("PRAGMA foreign_keys = OFF").Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("disabling foreign keys for migration %s: %w", migrationID, err)
|
||||
}
|
||||
} else {
|
||||
// Ensure foreign keys are enabled for this migration
|
||||
if err := dbConn.Exec("PRAGMA foreign_keys = ON").Error; err != nil {
|
||||
err := dbConn.Exec("PRAGMA foreign_keys = ON").Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("enabling foreign keys for migration %s: %w", migrationID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run up to this specific migration (will only run the next pending migration)
|
||||
if err := migrations.MigrateTo(migrationID); err != nil {
|
||||
err := migrations.MigrateTo(migrationID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("running migration %s: %w", migrationID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := dbConn.Exec("PRAGMA foreign_keys = ON").Error; err != nil {
|
||||
if err := dbConn.Exec("PRAGMA foreign_keys = ON").Error; err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("restoring foreign keys: %w", err)
|
||||
}
|
||||
|
||||
// Run the rest of the migrations
|
||||
if err := migrations.Migrate(); err != nil {
|
||||
if err := migrations.Migrate(); err != nil { //nolint:noinlineerr
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -981,16 +1038,22 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var violation constraintViolation
|
||||
if err := rows.Scan(&violation.Table, &violation.RowID, &violation.Parent, &violation.ConstraintIndex); err != nil {
|
||||
|
||||
err := rows.Scan(&violation.Table, &violation.RowID, &violation.Parent, &violation.ConstraintIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
violatedConstraints = append(violatedConstraints, violation)
|
||||
}
|
||||
_ = rows.Close()
|
||||
|
||||
if err := rows.Err(); err != nil { //nolint:noinlineerr
|
||||
return err
|
||||
}
|
||||
|
||||
if len(violatedConstraints) > 0 {
|
||||
for _, violation := range violatedConstraints {
|
||||
@@ -1005,7 +1068,8 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig
|
||||
}
|
||||
} else {
|
||||
// PostgreSQL can run all migrations in one block - no foreign key issues
|
||||
if err := migrations.Migrate(); err != nil {
|
||||
err := migrations.Migrate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -1016,6 +1080,7 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig
|
||||
func (hsdb *HSDatabase) PingDB(ctx context.Context) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
|
||||
sqlDB, err := hsdb.DB.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1031,7 +1096,7 @@ func (hsdb *HSDatabase) Close() error {
|
||||
}
|
||||
|
||||
if hsdb.cfg.Database.Type == types.DatabaseSqlite && hsdb.cfg.Database.Sqlite.WriteAheadLog {
|
||||
db.Exec("VACUUM")
|
||||
db.Exec("VACUUM") //nolint:errcheck,noctx
|
||||
}
|
||||
|
||||
return db.Close()
|
||||
@@ -1040,12 +1105,14 @@ func (hsdb *HSDatabase) Close() error {
|
||||
func (hsdb *HSDatabase) Read(fn func(rx *gorm.DB) error) error {
|
||||
rx := hsdb.DB.Begin()
|
||||
defer rx.Rollback()
|
||||
|
||||
return fn(rx)
|
||||
}
|
||||
|
||||
func Read[T any](db *gorm.DB, fn func(rx *gorm.DB) (T, error)) (T, error) {
|
||||
rx := db.Begin()
|
||||
defer rx.Rollback()
|
||||
|
||||
ret, err := fn(rx)
|
||||
if err != nil {
|
||||
var no T
|
||||
@@ -1058,7 +1125,9 @@ func Read[T any](db *gorm.DB, fn func(rx *gorm.DB) (T, error)) (T, error) {
|
||||
func (hsdb *HSDatabase) Write(fn func(tx *gorm.DB) error) error {
|
||||
tx := hsdb.DB.Begin()
|
||||
defer tx.Rollback()
|
||||
if err := fn(tx); err != nil {
|
||||
|
||||
err := fn(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1068,6 +1137,7 @@ func (hsdb *HSDatabase) Write(fn func(tx *gorm.DB) error) error {
|
||||
func Write[T any](db *gorm.DB, fn func(tx *gorm.DB) (T, error)) (T, error) {
|
||||
tx := db.Begin()
|
||||
defer tx.Rollback()
|
||||
|
||||
ret, err := fn(tx)
|
||||
if err != nil {
|
||||
var no T
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -44,6 +45,7 @@ func TestSQLiteMigrationAndDataValidation(t *testing.T) {
|
||||
|
||||
// Verify api_keys data preservation
|
||||
var apiKeyCount int
|
||||
|
||||
err = hsdb.DB.Raw("SELECT COUNT(*) FROM api_keys").Scan(&apiKeyCount).Error
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, apiKeyCount, "should preserve all 2 api_keys from original schema")
|
||||
@@ -160,8 +162,8 @@ func TestSQLiteMigrationAndDataValidation(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func emptyCache() *zcache.Cache[types.RegistrationID, types.RegisterNode] {
|
||||
return zcache.New[types.RegistrationID, types.RegisterNode](time.Minute, time.Hour)
|
||||
func emptyCache() *zcache.Cache[types.AuthID, types.AuthRequest] {
|
||||
return zcache.New[types.AuthID, types.AuthRequest](time.Minute, time.Hour)
|
||||
}
|
||||
|
||||
func createSQLiteFromSQLFile(sqlFilePath, dbPath string) error {
|
||||
@@ -176,7 +178,7 @@ func createSQLiteFromSQLFile(sqlFilePath, dbPath string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = db.Exec(string(schemaContent))
|
||||
_, err = db.ExecContext(context.Background(), string(schemaContent))
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -186,6 +188,7 @@ func createSQLiteFromSQLFile(sqlFilePath, dbPath string) error {
|
||||
func requireConstraintFailed(t *testing.T, err error) {
|
||||
t.Helper()
|
||||
require.Error(t, err)
|
||||
|
||||
if !strings.Contains(err.Error(), "UNIQUE constraint failed:") && !strings.Contains(err.Error(), "violates unique constraint") {
|
||||
require.Failf(t, "expected error to contain a constraint failure, got: %s", err.Error())
|
||||
}
|
||||
@@ -198,7 +201,7 @@ func TestConstraints(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "no-duplicate-username-if-no-oidc",
|
||||
run: func(t *testing.T, db *gorm.DB) {
|
||||
run: func(t *testing.T, db *gorm.DB) { //nolint:thelper
|
||||
_, err := CreateUser(db, types.User{Name: "user1"})
|
||||
require.NoError(t, err)
|
||||
_, err = CreateUser(db, types.User{Name: "user1"})
|
||||
@@ -207,7 +210,7 @@ func TestConstraints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "no-oidc-duplicate-username-and-id",
|
||||
run: func(t *testing.T, db *gorm.DB) {
|
||||
run: func(t *testing.T, db *gorm.DB) { //nolint:thelper
|
||||
user := types.User{
|
||||
Model: gorm.Model{ID: 1},
|
||||
Name: "user1",
|
||||
@@ -229,7 +232,7 @@ func TestConstraints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "no-oidc-duplicate-id",
|
||||
run: func(t *testing.T, db *gorm.DB) {
|
||||
run: func(t *testing.T, db *gorm.DB) { //nolint:thelper
|
||||
user := types.User{
|
||||
Model: gorm.Model{ID: 1},
|
||||
Name: "user1",
|
||||
@@ -251,7 +254,7 @@ func TestConstraints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "allow-duplicate-username-cli-then-oidc",
|
||||
run: func(t *testing.T, db *gorm.DB) {
|
||||
run: func(t *testing.T, db *gorm.DB) { //nolint:thelper
|
||||
_, err := CreateUser(db, types.User{Name: "user1"}) // Create CLI username
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -266,7 +269,7 @@ func TestConstraints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "allow-duplicate-username-oidc-then-cli",
|
||||
run: func(t *testing.T, db *gorm.DB) {
|
||||
run: func(t *testing.T, db *gorm.DB) { //nolint:thelper
|
||||
user := types.User{
|
||||
Name: "user1",
|
||||
ProviderIdentifier: sql.NullString{String: "http://test.com/user1", Valid: true},
|
||||
@@ -320,7 +323,7 @@ func TestPostgresMigrationAndDataValidation(t *testing.T) {
|
||||
}
|
||||
|
||||
// Construct the pg_restore command
|
||||
cmd := exec.Command(pgRestorePath, "--verbose", "--if-exists", "--clean", "--no-owner", "--dbname", u.String(), tt.dbPath)
|
||||
cmd := exec.CommandContext(context.Background(), pgRestorePath, "--verbose", "--if-exists", "--clean", "--no-owner", "--dbname", u.String(), tt.dbPath)
|
||||
|
||||
// Set the output streams
|
||||
cmd.Stdout = os.Stdout
|
||||
@@ -401,6 +404,7 @@ func dbForTestWithPath(t *testing.T, sqlFilePath string) *HSDatabase {
|
||||
// skip already-applied migrations and only run new ones.
|
||||
func TestSQLiteAllTestdataMigrations(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
schemas, err := os.ReadDir("testdata/sqlite")
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -27,13 +27,17 @@ func TestEphemeralGarbageCollectorGoRoutineLeak(t *testing.T) {
|
||||
t.Logf("Initial number of goroutines: %d", initialGoroutines)
|
||||
|
||||
// Basic deletion tracking mechanism
|
||||
var deletedIDs []types.NodeID
|
||||
var deleteMutex sync.Mutex
|
||||
var deletionWg sync.WaitGroup
|
||||
var (
|
||||
deletedIDs []types.NodeID
|
||||
deleteMutex sync.Mutex
|
||||
deletionWg sync.WaitGroup
|
||||
)
|
||||
|
||||
deleteFunc := func(nodeID types.NodeID) {
|
||||
deleteMutex.Lock()
|
||||
|
||||
deletedIDs = append(deletedIDs, nodeID)
|
||||
|
||||
deleteMutex.Unlock()
|
||||
deletionWg.Done()
|
||||
}
|
||||
@@ -43,14 +47,17 @@ func TestEphemeralGarbageCollectorGoRoutineLeak(t *testing.T) {
|
||||
go gc.Start()
|
||||
|
||||
// Schedule several nodes for deletion with short expiry
|
||||
const expiry = fifty
|
||||
const numNodes = 100
|
||||
const (
|
||||
expiry = fifty
|
||||
numNodes = 100
|
||||
)
|
||||
|
||||
// Set up wait group for expected deletions
|
||||
|
||||
deletionWg.Add(numNodes)
|
||||
|
||||
for i := 1; i <= numNodes; i++ {
|
||||
gc.Schedule(types.NodeID(i), expiry)
|
||||
gc.Schedule(types.NodeID(i), expiry) //nolint:gosec // safe conversion in test
|
||||
}
|
||||
|
||||
// Wait for all scheduled deletions to complete
|
||||
@@ -63,7 +70,7 @@ func TestEphemeralGarbageCollectorGoRoutineLeak(t *testing.T) {
|
||||
|
||||
// Schedule and immediately cancel to test that part of the code
|
||||
for i := numNodes + 1; i <= numNodes*2; i++ {
|
||||
nodeID := types.NodeID(i)
|
||||
nodeID := types.NodeID(i) //nolint:gosec // safe conversion in test
|
||||
gc.Schedule(nodeID, time.Hour)
|
||||
gc.Cancel(nodeID)
|
||||
}
|
||||
@@ -87,14 +94,18 @@ func TestEphemeralGarbageCollectorGoRoutineLeak(t *testing.T) {
|
||||
// and then reschedules it with a shorter expiry, and verifies that the node is deleted only once.
|
||||
func TestEphemeralGarbageCollectorReschedule(t *testing.T) {
|
||||
// Deletion tracking mechanism
|
||||
var deletedIDs []types.NodeID
|
||||
var deleteMutex sync.Mutex
|
||||
var (
|
||||
deletedIDs []types.NodeID
|
||||
deleteMutex sync.Mutex
|
||||
)
|
||||
|
||||
deletionNotifier := make(chan types.NodeID, 1)
|
||||
|
||||
deleteFunc := func(nodeID types.NodeID) {
|
||||
deleteMutex.Lock()
|
||||
|
||||
deletedIDs = append(deletedIDs, nodeID)
|
||||
|
||||
deleteMutex.Unlock()
|
||||
|
||||
deletionNotifier <- nodeID
|
||||
@@ -102,11 +113,14 @@ func TestEphemeralGarbageCollectorReschedule(t *testing.T) {
|
||||
|
||||
// Start GC
|
||||
gc := NewEphemeralGarbageCollector(deleteFunc)
|
||||
|
||||
go gc.Start()
|
||||
defer gc.Close()
|
||||
|
||||
const shortExpiry = fifty
|
||||
const longExpiry = 1 * time.Hour
|
||||
const (
|
||||
shortExpiry = fifty
|
||||
longExpiry = 1 * time.Hour
|
||||
)
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
|
||||
@@ -136,23 +150,31 @@ func TestEphemeralGarbageCollectorReschedule(t *testing.T) {
|
||||
// and verifies that the node is deleted only once.
|
||||
func TestEphemeralGarbageCollectorCancelAndReschedule(t *testing.T) {
|
||||
// Deletion tracking mechanism
|
||||
var deletedIDs []types.NodeID
|
||||
var deleteMutex sync.Mutex
|
||||
var (
|
||||
deletedIDs []types.NodeID
|
||||
deleteMutex sync.Mutex
|
||||
)
|
||||
|
||||
deletionNotifier := make(chan types.NodeID, 1)
|
||||
|
||||
deleteFunc := func(nodeID types.NodeID) {
|
||||
deleteMutex.Lock()
|
||||
|
||||
deletedIDs = append(deletedIDs, nodeID)
|
||||
|
||||
deleteMutex.Unlock()
|
||||
|
||||
deletionNotifier <- nodeID
|
||||
}
|
||||
|
||||
// Start the GC
|
||||
gc := NewEphemeralGarbageCollector(deleteFunc)
|
||||
|
||||
go gc.Start()
|
||||
defer gc.Close()
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
|
||||
const expiry = fifty
|
||||
|
||||
// Schedule node for deletion
|
||||
@@ -196,14 +218,18 @@ func TestEphemeralGarbageCollectorCancelAndReschedule(t *testing.T) {
|
||||
// It creates a new EphemeralGarbageCollector, schedules a node for deletion, closes the GC, and verifies that the node is not deleted.
|
||||
func TestEphemeralGarbageCollectorCloseBeforeTimerFires(t *testing.T) {
|
||||
// Deletion tracking
|
||||
var deletedIDs []types.NodeID
|
||||
var deleteMutex sync.Mutex
|
||||
var (
|
||||
deletedIDs []types.NodeID
|
||||
deleteMutex sync.Mutex
|
||||
)
|
||||
|
||||
deletionNotifier := make(chan types.NodeID, 1)
|
||||
|
||||
deleteFunc := func(nodeID types.NodeID) {
|
||||
deleteMutex.Lock()
|
||||
|
||||
deletedIDs = append(deletedIDs, nodeID)
|
||||
|
||||
deleteMutex.Unlock()
|
||||
|
||||
deletionNotifier <- nodeID
|
||||
@@ -246,13 +272,18 @@ func TestEphemeralGarbageCollectorScheduleAfterClose(t *testing.T) {
|
||||
t.Logf("Initial number of goroutines: %d", initialGoroutines)
|
||||
|
||||
// Deletion tracking
|
||||
var deletedIDs []types.NodeID
|
||||
var deleteMutex sync.Mutex
|
||||
var (
|
||||
deletedIDs []types.NodeID
|
||||
deleteMutex sync.Mutex
|
||||
)
|
||||
|
||||
nodeDeleted := make(chan struct{})
|
||||
|
||||
deleteFunc := func(nodeID types.NodeID) {
|
||||
deleteMutex.Lock()
|
||||
|
||||
deletedIDs = append(deletedIDs, nodeID)
|
||||
|
||||
deleteMutex.Unlock()
|
||||
close(nodeDeleted) // Signal that deletion happened
|
||||
}
|
||||
@@ -263,10 +294,12 @@ func TestEphemeralGarbageCollectorScheduleAfterClose(t *testing.T) {
|
||||
// Use a WaitGroup to ensure the GC has started
|
||||
var startWg sync.WaitGroup
|
||||
startWg.Add(1)
|
||||
|
||||
go func() {
|
||||
startWg.Done() // Signal that the goroutine has started
|
||||
gc.Start()
|
||||
}()
|
||||
|
||||
startWg.Wait() // Wait for the GC to start
|
||||
|
||||
// Close GC right away
|
||||
@@ -288,7 +321,9 @@ func TestEphemeralGarbageCollectorScheduleAfterClose(t *testing.T) {
|
||||
|
||||
// Check no node was deleted
|
||||
deleteMutex.Lock()
|
||||
|
||||
nodesDeleted := len(deletedIDs)
|
||||
|
||||
deleteMutex.Unlock()
|
||||
assert.Equal(t, 0, nodesDeleted, "No nodes should be deleted when Schedule is called after Close")
|
||||
|
||||
@@ -311,12 +346,16 @@ func TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) {
|
||||
t.Logf("Initial number of goroutines: %d", initialGoroutines)
|
||||
|
||||
// Deletion tracking mechanism
|
||||
var deletedIDs []types.NodeID
|
||||
var deleteMutex sync.Mutex
|
||||
var (
|
||||
deletedIDs []types.NodeID
|
||||
deleteMutex sync.Mutex
|
||||
)
|
||||
|
||||
deleteFunc := func(nodeID types.NodeID) {
|
||||
deleteMutex.Lock()
|
||||
|
||||
deletedIDs = append(deletedIDs, nodeID)
|
||||
|
||||
deleteMutex.Unlock()
|
||||
}
|
||||
|
||||
@@ -325,8 +364,10 @@ func TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) {
|
||||
go gc.Start()
|
||||
|
||||
// Number of concurrent scheduling goroutines
|
||||
const numSchedulers = 10
|
||||
const nodesPerScheduler = 50
|
||||
const (
|
||||
numSchedulers = 10
|
||||
nodesPerScheduler = 50
|
||||
)
|
||||
|
||||
const closeAfterNodes = 25 // Close GC after this many nodes per scheduler
|
||||
|
||||
@@ -353,8 +394,8 @@ func TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) {
|
||||
case <-stopScheduling:
|
||||
return
|
||||
default:
|
||||
nodeID := types.NodeID(baseNodeID + j + 1)
|
||||
gc.Schedule(nodeID, 1*time.Hour) // Long expiry to ensure it doesn't trigger during test
|
||||
nodeID := types.NodeID(baseNodeID + j + 1) //nolint:gosec // safe conversion in test
|
||||
gc.Schedule(nodeID, 1*time.Hour) // Long expiry to ensure it doesn't trigger during test
|
||||
atomic.AddInt64(&scheduledCount, 1)
|
||||
|
||||
// Yield to other goroutines to introduce variability
|
||||
|
||||
@@ -17,7 +17,11 @@ import (
|
||||
"tailscale.com/net/tsaddr"
|
||||
)
|
||||
|
||||
var errGeneratedIPBytesInvalid = errors.New("generated ip bytes are invalid ip")
|
||||
var (
|
||||
errGeneratedIPBytesInvalid = errors.New("generated ip bytes are invalid ip")
|
||||
errGeneratedIPNotInPrefix = errors.New("generated ip not in prefix")
|
||||
errIPAllocatorNil = errors.New("ip allocator was nil")
|
||||
)
|
||||
|
||||
// IPAllocator is a singleton responsible for allocating
|
||||
// IP addresses for nodes and making sure the same
|
||||
@@ -62,8 +66,10 @@ func NewIPAllocator(
|
||||
strategy: strategy,
|
||||
}
|
||||
|
||||
var v4s []sql.NullString
|
||||
var v6s []sql.NullString
|
||||
var (
|
||||
v4s []sql.NullString
|
||||
v6s []sql.NullString
|
||||
)
|
||||
|
||||
if db != nil {
|
||||
err := db.Read(func(rx *gorm.DB) error {
|
||||
@@ -135,15 +141,18 @@ func (i *IPAllocator) Next() (*netip.Addr, *netip.Addr, error) {
|
||||
i.mu.Lock()
|
||||
defer i.mu.Unlock()
|
||||
|
||||
var err error
|
||||
var ret4 *netip.Addr
|
||||
var ret6 *netip.Addr
|
||||
var (
|
||||
err error
|
||||
ret4 *netip.Addr
|
||||
ret6 *netip.Addr
|
||||
)
|
||||
|
||||
if i.prefix4 != nil {
|
||||
ret4, err = i.next(i.prev4, i.prefix4)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("allocating IPv4 address: %w", err)
|
||||
}
|
||||
|
||||
i.prev4 = *ret4
|
||||
}
|
||||
|
||||
@@ -152,6 +161,7 @@ func (i *IPAllocator) Next() (*netip.Addr, *netip.Addr, error) {
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("allocating IPv6 address: %w", err)
|
||||
}
|
||||
|
||||
i.prev6 = *ret6
|
||||
}
|
||||
|
||||
@@ -168,8 +178,10 @@ func (i *IPAllocator) nextLocked(prev netip.Addr, prefix *netip.Prefix) (*netip.
|
||||
}
|
||||
|
||||
func (i *IPAllocator) next(prev netip.Addr, prefix *netip.Prefix) (*netip.Addr, error) {
|
||||
var err error
|
||||
var ip netip.Addr
|
||||
var (
|
||||
err error
|
||||
ip netip.Addr
|
||||
)
|
||||
|
||||
switch i.strategy {
|
||||
case types.IPAllocationStrategySequential:
|
||||
@@ -243,7 +255,8 @@ func randomNext(pfx netip.Prefix) (netip.Addr, error) {
|
||||
|
||||
if !pfx.Contains(ip) {
|
||||
return netip.Addr{}, fmt.Errorf(
|
||||
"generated ip(%s) not in prefix(%s)",
|
||||
"%w: ip(%s) not in prefix(%s)",
|
||||
errGeneratedIPNotInPrefix,
|
||||
ip.String(),
|
||||
pfx.String(),
|
||||
)
|
||||
@@ -268,11 +281,14 @@ func isTailscaleReservedIP(ip netip.Addr) bool {
|
||||
// If a prefix type has been removed (IPv4 or IPv6), it
|
||||
// will remove the IPs in that family from the node.
|
||||
func (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) {
|
||||
var err error
|
||||
var ret []string
|
||||
var (
|
||||
err error
|
||||
ret []string
|
||||
)
|
||||
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
if i == nil {
|
||||
return errors.New("backfilling IPs: ip allocator was nil")
|
||||
return fmt.Errorf("backfilling IPs: %w", errIPAllocatorNil)
|
||||
}
|
||||
|
||||
log.Trace().Caller().Msgf("starting to backfill IPs")
|
||||
@@ -283,18 +299,19 @@ func (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) {
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
log.Trace().Caller().Uint64("node.id", node.ID.Uint64()).Str("node.name", node.Hostname).Msg("IP backfill check started because node found in database")
|
||||
log.Trace().Caller().EmbedObject(node).Msg("ip backfill check started because node found in database")
|
||||
|
||||
changed := false
|
||||
// IPv4 prefix is set, but node ip is missing, alloc
|
||||
if i.prefix4 != nil && node.IPv4 == nil {
|
||||
ret4, err := i.nextLocked(i.prev4, i.prefix4)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to allocate ipv4 for node(%d): %w", node.ID, err)
|
||||
return fmt.Errorf("allocating IPv4 for node(%d): %w", node.ID, err)
|
||||
}
|
||||
|
||||
node.IPv4 = ret4
|
||||
changed = true
|
||||
|
||||
ret = append(ret, fmt.Sprintf("assigned IPv4 %q to Node(%d) %q", ret4.String(), node.ID, node.Hostname))
|
||||
}
|
||||
|
||||
@@ -302,11 +319,12 @@ func (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) {
|
||||
if i.prefix6 != nil && node.IPv6 == nil {
|
||||
ret6, err := i.nextLocked(i.prev6, i.prefix6)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to allocate ipv6 for node(%d): %w", node.ID, err)
|
||||
return fmt.Errorf("allocating IPv6 for node(%d): %w", node.ID, err)
|
||||
}
|
||||
|
||||
node.IPv6 = ret6
|
||||
changed = true
|
||||
|
||||
ret = append(ret, fmt.Sprintf("assigned IPv6 %q to Node(%d) %q", ret6.String(), node.ID, node.Hostname))
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
var mpp = func(pref string) *netip.Prefix {
|
||||
@@ -21,9 +20,7 @@ var mpp = func(pref string) *netip.Prefix {
|
||||
return &p
|
||||
}
|
||||
|
||||
var na = func(pref string) netip.Addr {
|
||||
return netip.MustParseAddr(pref)
|
||||
}
|
||||
var na = netip.MustParseAddr
|
||||
|
||||
var nap = func(pref string) *netip.Addr {
|
||||
n := na(pref)
|
||||
@@ -158,8 +155,10 @@ func TestIPAllocatorSequential(t *testing.T) {
|
||||
types.IPAllocationStrategySequential,
|
||||
)
|
||||
|
||||
var got4s []netip.Addr
|
||||
var got6s []netip.Addr
|
||||
var (
|
||||
got4s []netip.Addr
|
||||
got6s []netip.Addr
|
||||
)
|
||||
|
||||
for range tt.getCount {
|
||||
got4, got6, err := alloc.Next()
|
||||
@@ -175,6 +174,7 @@ func TestIPAllocatorSequential(t *testing.T) {
|
||||
got6s = append(got6s, *got6)
|
||||
}
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.want4, got4s, util.Comparers...); diff != "" {
|
||||
t.Errorf("IPAllocator 4s unexpected result (-want +got):\n%s", diff)
|
||||
}
|
||||
@@ -288,6 +288,7 @@ func TestBackfillIPAddresses(t *testing.T) {
|
||||
fullNodeP := func(i int) *types.Node {
|
||||
v4 := fmt.Sprintf("100.64.0.%d", i)
|
||||
v6 := fmt.Sprintf("fd7a:115c:a1e0::%d", i)
|
||||
|
||||
return &types.Node{
|
||||
IPv4: nap(v4),
|
||||
IPv6: nap(v6),
|
||||
@@ -484,12 +485,13 @@ func TestBackfillIPAddresses(t *testing.T) {
|
||||
func TestIPAllocatorNextNoReservedIPs(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
require.NoError(t, err)
|
||||
|
||||
defer db.Close()
|
||||
|
||||
alloc, err := NewIPAllocator(
|
||||
db,
|
||||
ptr.To(tsaddr.CGNATRange()),
|
||||
ptr.To(tsaddr.TailscaleULARange()),
|
||||
new(tsaddr.CGNATRange()),
|
||||
new(tsaddr.TailscaleULARange()),
|
||||
types.IPAllocationStrategySequential,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -497,17 +499,17 @@ func TestIPAllocatorNextNoReservedIPs(t *testing.T) {
|
||||
}
|
||||
|
||||
// Validate that we do not give out 100.100.100.100
|
||||
nextQuad100, err := alloc.next(na("100.100.100.99"), ptr.To(tsaddr.CGNATRange()))
|
||||
nextQuad100, err := alloc.next(na("100.100.100.99"), new(tsaddr.CGNATRange()))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, na("100.100.100.101"), *nextQuad100)
|
||||
|
||||
// Validate that we do not give out fd7a:115c:a1e0::53
|
||||
nextQuad100v6, err := alloc.next(na("fd7a:115c:a1e0::52"), ptr.To(tsaddr.TailscaleULARange()))
|
||||
nextQuad100v6, err := alloc.next(na("fd7a:115c:a1e0::52"), new(tsaddr.TailscaleULARange()))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, na("fd7a:115c:a1e0::54"), *nextQuad100v6)
|
||||
|
||||
// Validate that we do not give out fd7a:115c:a1e0::53
|
||||
nextChrome, err := alloc.next(na("100.115.91.255"), ptr.To(tsaddr.CGNATRange()))
|
||||
nextChrome, err := alloc.next(na("100.115.91.255"), new(tsaddr.CGNATRange()))
|
||||
t.Logf("chrome: %s", nextChrome.String())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, na("100.115.94.0"), *nextChrome)
|
||||
|
||||
@@ -16,18 +16,24 @@ import (
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/juanfont/headscale/hscontrol/util/zlog/zf"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
const (
|
||||
NodeGivenNameHashLength = 8
|
||||
NodeGivenNameTrimSize = 2
|
||||
|
||||
// defaultTestNodePrefix is the default hostname prefix for nodes created in tests.
|
||||
defaultTestNodePrefix = "testnode"
|
||||
)
|
||||
|
||||
// ErrNodeNameNotUnique is returned when a node name is not unique.
|
||||
var ErrNodeNameNotUnique = errors.New("node name is not unique")
|
||||
|
||||
var invalidDNSRegex = regexp.MustCompile("[^a-z0-9-.]+")
|
||||
|
||||
var (
|
||||
@@ -51,12 +57,14 @@ func (hsdb *HSDatabase) ListPeers(nodeID types.NodeID, peerIDs ...types.NodeID)
|
||||
// If at least one peer ID is given, only these peer nodes will be returned.
|
||||
func ListPeers(tx *gorm.DB, nodeID types.NodeID, peerIDs ...types.NodeID) (types.Nodes, error) {
|
||||
nodes := types.Nodes{}
|
||||
if err := tx.
|
||||
|
||||
err := tx.
|
||||
Preload("AuthKey").
|
||||
Preload("AuthKey.User").
|
||||
Preload("User").
|
||||
Where("id <> ?", nodeID).
|
||||
Where(peerIDs).Find(&nodes).Error; err != nil {
|
||||
Where(peerIDs).Find(&nodes).Error
|
||||
if err != nil {
|
||||
return types.Nodes{}, err
|
||||
}
|
||||
|
||||
@@ -75,11 +83,13 @@ func (hsdb *HSDatabase) ListNodes(nodeIDs ...types.NodeID) (types.Nodes, error)
|
||||
// or for the given nodes if at least one node ID is given as parameter.
|
||||
func ListNodes(tx *gorm.DB, nodeIDs ...types.NodeID) (types.Nodes, error) {
|
||||
nodes := types.Nodes{}
|
||||
if err := tx.
|
||||
|
||||
err := tx.
|
||||
Preload("AuthKey").
|
||||
Preload("AuthKey.User").
|
||||
Preload("User").
|
||||
Where(nodeIDs).Find(&nodes).Error; err != nil {
|
||||
Where(nodeIDs).Find(&nodes).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -89,7 +99,9 @@ func ListNodes(tx *gorm.DB, nodeIDs ...types.NodeID) (types.Nodes, error) {
|
||||
func (hsdb *HSDatabase) ListEphemeralNodes() (types.Nodes, error) {
|
||||
return Read(hsdb.DB, func(rx *gorm.DB) (types.Nodes, error) {
|
||||
nodes := types.Nodes{}
|
||||
if err := rx.Joins("AuthKey").Where(`"AuthKey"."ephemeral" = true`).Find(&nodes).Error; err != nil {
|
||||
|
||||
err := rx.Joins("AuthKey").Where(`"AuthKey"."ephemeral" = true`).Find(&nodes).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -207,6 +219,7 @@ func SetTags(
|
||||
|
||||
slices.Sort(tags)
|
||||
tags = slices.Compact(tags)
|
||||
|
||||
b, err := json.Marshal(tags)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -220,7 +233,7 @@ func SetTags(
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetTags takes a Node struct pointer and update the forced tags.
|
||||
// SetApprovedRoutes takes a Node struct pointer and updates the approved routes.
|
||||
func SetApprovedRoutes(
|
||||
tx *gorm.DB,
|
||||
nodeID types.NodeID,
|
||||
@@ -228,7 +241,8 @@ func SetApprovedRoutes(
|
||||
) error {
|
||||
if len(routes) == 0 {
|
||||
// if no routes are provided, we remove all
|
||||
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", "[]").Error; err != nil {
|
||||
err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", "[]").Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("removing approved routes: %w", err)
|
||||
}
|
||||
|
||||
@@ -251,7 +265,7 @@ func SetApprovedRoutes(
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", string(b)).Error; err != nil {
|
||||
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", string(b)).Error; err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("updating approved routes: %w", err)
|
||||
}
|
||||
|
||||
@@ -277,37 +291,39 @@ func SetLastSeen(tx *gorm.DB, nodeID types.NodeID, lastSeen time.Time) error {
|
||||
func RenameNode(tx *gorm.DB,
|
||||
nodeID types.NodeID, newName string,
|
||||
) error {
|
||||
if err := util.ValidateHostname(newName); err != nil {
|
||||
err := util.ValidateHostname(newName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("renaming node: %w", err)
|
||||
}
|
||||
|
||||
// Check if the new name is unique
|
||||
var count int64
|
||||
if err := tx.Model(&types.Node{}).Where("given_name = ? AND id != ?", newName, nodeID).Count(&count).Error; err != nil {
|
||||
return fmt.Errorf("failed to check name uniqueness: %w", err)
|
||||
|
||||
err = tx.Model(&types.Node{}).Where("given_name = ? AND id != ?", newName, nodeID).Count(&count).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking name uniqueness: %w", err)
|
||||
}
|
||||
|
||||
if count > 0 {
|
||||
return errors.New("name is not unique")
|
||||
return ErrNodeNameNotUnique
|
||||
}
|
||||
|
||||
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("given_name", newName).Error; err != nil {
|
||||
return fmt.Errorf("failed to rename node in the database: %w", err)
|
||||
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("given_name", newName).Error; err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("renaming node in database: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) NodeSetExpiry(nodeID types.NodeID, expiry time.Time) error {
|
||||
func (hsdb *HSDatabase) NodeSetExpiry(nodeID types.NodeID, expiry *time.Time) error {
|
||||
return hsdb.Write(func(tx *gorm.DB) error {
|
||||
return NodeSetExpiry(tx, nodeID, expiry)
|
||||
})
|
||||
}
|
||||
|
||||
// NodeSetExpiry takes a Node struct and a new expiry time.
|
||||
func NodeSetExpiry(tx *gorm.DB,
|
||||
nodeID types.NodeID, expiry time.Time,
|
||||
) error {
|
||||
// NodeSetExpiry sets a new expiry time for a node.
|
||||
// If expiry is nil, the node's expiry is disabled (node will never expire).
|
||||
func NodeSetExpiry(tx *gorm.DB, nodeID types.NodeID, expiry *time.Time) error {
|
||||
return tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("expiry", expiry).Error
|
||||
}
|
||||
|
||||
@@ -323,7 +339,8 @@ func DeleteNode(tx *gorm.DB,
|
||||
node *types.Node,
|
||||
) error {
|
||||
// Unscoped causes the node to be fully removed from the database.
|
||||
if err := tx.Unscoped().Delete(&types.Node{}, node.ID).Error; err != nil {
|
||||
err := tx.Unscoped().Delete(&types.Node{}, node.ID).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -337,9 +354,11 @@ func (hsdb *HSDatabase) DeleteEphemeralNode(
|
||||
nodeID types.NodeID,
|
||||
) error {
|
||||
return hsdb.Write(func(tx *gorm.DB) error {
|
||||
if err := tx.Unscoped().Delete(&types.Node{}, nodeID).Error; err != nil {
|
||||
err := tx.Unscoped().Delete(&types.Node{}, nodeID).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -352,19 +371,19 @@ func RegisterNodeForTest(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *n
|
||||
}
|
||||
|
||||
logEvent := log.Debug().
|
||||
Str("node", node.Hostname).
|
||||
Str("machine_key", node.MachineKey.ShortString()).
|
||||
Str("node_key", node.NodeKey.ShortString())
|
||||
Str(zf.NodeHostname, node.Hostname).
|
||||
Str(zf.MachineKey, node.MachineKey.ShortString()).
|
||||
Str(zf.NodeKey, node.NodeKey.ShortString())
|
||||
|
||||
if node.User != nil {
|
||||
logEvent = logEvent.Str("user", node.User.Username())
|
||||
logEvent = logEvent.Str(zf.UserName, node.User.Username())
|
||||
} else if node.UserID != nil {
|
||||
logEvent = logEvent.Uint("user_id", *node.UserID)
|
||||
logEvent = logEvent.Uint(zf.UserID, *node.UserID)
|
||||
} else {
|
||||
logEvent = logEvent.Str("user", "none")
|
||||
logEvent = logEvent.Str(zf.UserName, "none")
|
||||
}
|
||||
|
||||
logEvent.Msg("Registering test node")
|
||||
logEvent.Msg("registering test node")
|
||||
|
||||
// If the a new node is registered with the same machine key, to the same user,
|
||||
// update the existing node.
|
||||
@@ -379,6 +398,7 @@ func RegisterNodeForTest(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *n
|
||||
if ipv4 == nil {
|
||||
ipv4 = oldNode.IPv4
|
||||
}
|
||||
|
||||
if ipv6 == nil {
|
||||
ipv6 = oldNode.IPv6
|
||||
}
|
||||
@@ -388,16 +408,17 @@ func RegisterNodeForTest(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *n
|
||||
// so we store the node.Expire and node.Nodekey that has been set when
|
||||
// adding it to the registrationCache
|
||||
if node.IPv4 != nil || node.IPv6 != nil {
|
||||
if err := tx.Save(&node).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed register existing node in the database: %w", err)
|
||||
err := tx.Save(&node).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("registering existing node in database: %w", err)
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Str("machine_key", node.MachineKey.ShortString()).
|
||||
Str("node_key", node.NodeKey.ShortString()).
|
||||
Str("user", node.User.Username()).
|
||||
Str(zf.NodeHostname, node.Hostname).
|
||||
Str(zf.MachineKey, node.MachineKey.ShortString()).
|
||||
Str(zf.NodeKey, node.NodeKey.ShortString()).
|
||||
Str(zf.UserName, node.User.Username()).
|
||||
Msg("Test node authorized again")
|
||||
|
||||
return &node, nil
|
||||
@@ -407,29 +428,30 @@ func RegisterNodeForTest(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *n
|
||||
node.IPv6 = ipv6
|
||||
|
||||
var err error
|
||||
|
||||
node.Hostname, err = util.NormaliseHostname(node.Hostname)
|
||||
if err != nil {
|
||||
newHostname := util.InvalidString()
|
||||
log.Info().Err(err).Str("invalid-hostname", node.Hostname).Str("new-hostname", newHostname).Msgf("Invalid hostname, replacing")
|
||||
log.Info().Err(err).Str(zf.InvalidHostname, node.Hostname).Str(zf.NewHostname, newHostname).Msgf("invalid hostname, replacing")
|
||||
node.Hostname = newHostname
|
||||
}
|
||||
|
||||
if node.GivenName == "" {
|
||||
givenName, err := EnsureUniqueGivenName(tx, node.Hostname)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to ensure unique given name: %w", err)
|
||||
return nil, fmt.Errorf("ensuring unique given name: %w", err)
|
||||
}
|
||||
|
||||
node.GivenName = givenName
|
||||
}
|
||||
|
||||
if err := tx.Save(&node).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed register(save) node in the database: %w", err)
|
||||
if err := tx.Save(&node).Error; err != nil { //nolint:noinlineerr
|
||||
return nil, fmt.Errorf("saving node to database: %w", err)
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Str(zf.NodeHostname, node.Hostname).
|
||||
Msg("Test node registered with the database")
|
||||
|
||||
return &node, nil
|
||||
@@ -491,8 +513,10 @@ func generateGivenName(suppliedName string, randomSuffix bool) (string, error) {
|
||||
|
||||
func isUniqueName(tx *gorm.DB, name string) (bool, error) {
|
||||
nodes := types.Nodes{}
|
||||
if err := tx.
|
||||
Where("given_name = ?", name).Find(&nodes).Error; err != nil {
|
||||
|
||||
err := tx.
|
||||
Where("given_name = ?", name).Find(&nodes).Error
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -646,7 +670,7 @@ func (hsdb *HSDatabase) CreateNodeForTest(user *types.User, hostname ...string)
|
||||
panic("CreateNodeForTest requires a valid user")
|
||||
}
|
||||
|
||||
nodeName := "testnode"
|
||||
nodeName := defaultTestNodePrefix
|
||||
if len(hostname) > 0 && hostname[0] != "" {
|
||||
nodeName = hostname[0]
|
||||
}
|
||||
@@ -657,6 +681,7 @@ func (hsdb *HSDatabase) CreateNodeForTest(user *types.User, hostname ...string)
|
||||
panic(fmt.Sprintf("failed to create preauth key for test node: %v", err))
|
||||
}
|
||||
|
||||
pakID := pak.ID
|
||||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
discoKey := key.NewDisco()
|
||||
@@ -668,7 +693,7 @@ func (hsdb *HSDatabase) CreateNodeForTest(user *types.User, hostname ...string)
|
||||
Hostname: nodeName,
|
||||
UserID: &user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: ptr.To(pak.ID),
|
||||
AuthKeyID: &pakID,
|
||||
}
|
||||
|
||||
err = hsdb.DB.Save(node).Error
|
||||
@@ -694,9 +719,12 @@ func (hsdb *HSDatabase) CreateRegisteredNodeForTest(user *types.User, hostname .
|
||||
}
|
||||
|
||||
var registeredNode *types.Node
|
||||
|
||||
err = hsdb.DB.Transaction(func(tx *gorm.DB) error {
|
||||
var err error
|
||||
|
||||
registeredNode, err = RegisterNodeForTest(tx, *node, ipv4, ipv6)
|
||||
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -715,7 +743,7 @@ func (hsdb *HSDatabase) CreateNodesForTest(user *types.User, count int, hostname
|
||||
panic("CreateNodesForTest requires a valid user")
|
||||
}
|
||||
|
||||
prefix := "testnode"
|
||||
prefix := defaultTestNodePrefix
|
||||
if len(hostnamePrefix) > 0 && hostnamePrefix[0] != "" {
|
||||
prefix = hostnamePrefix[0]
|
||||
}
|
||||
@@ -738,7 +766,7 @@ func (hsdb *HSDatabase) CreateRegisteredNodesForTest(user *types.User, count int
|
||||
panic("CreateRegisteredNodesForTest requires a valid user")
|
||||
}
|
||||
|
||||
prefix := "testnode"
|
||||
prefix := defaultTestNodePrefix
|
||||
if len(hostnamePrefix) > 0 && hostnamePrefix[0] != "" {
|
||||
prefix = hostnamePrefix[0]
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
func TestGetNode(t *testing.T) {
|
||||
@@ -102,6 +101,8 @@ func TestExpireNode(t *testing.T) {
|
||||
pak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
pakID := pak.ID
|
||||
|
||||
_, err = db.getNode(types.UserID(user.ID), "testnode")
|
||||
require.Error(t, err)
|
||||
|
||||
@@ -115,7 +116,7 @@ func TestExpireNode(t *testing.T) {
|
||||
Hostname: "testnode",
|
||||
UserID: &user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: ptr.To(pak.ID),
|
||||
AuthKeyID: &pakID,
|
||||
Expiry: &time.Time{},
|
||||
}
|
||||
db.DB.Save(node)
|
||||
@@ -127,7 +128,7 @@ func TestExpireNode(t *testing.T) {
|
||||
assert.False(t, nodeFromDB.IsExpired())
|
||||
|
||||
now := time.Now()
|
||||
err = db.NodeSetExpiry(nodeFromDB.ID, now)
|
||||
err = db.NodeSetExpiry(nodeFromDB.ID, &now)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodeFromDB, err = db.getNode(types.UserID(user.ID), "testnode")
|
||||
@@ -136,6 +137,48 @@ func TestExpireNode(t *testing.T) {
|
||||
assert.True(t, nodeFromDB.IsExpired())
|
||||
}
|
||||
|
||||
func TestDisableNodeExpiry(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
require.NoError(t, err)
|
||||
|
||||
user, err := db.CreateUser(types.User{Name: "test"})
|
||||
require.NoError(t, err)
|
||||
|
||||
pak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
pakID := pak.ID
|
||||
node := &types.Node{
|
||||
ID: 0,
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: "testnode",
|
||||
UserID: &user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: &pakID,
|
||||
Expiry: &time.Time{},
|
||||
}
|
||||
db.DB.Save(node)
|
||||
|
||||
// Set an expiry first.
|
||||
past := time.Now().Add(-time.Hour)
|
||||
err = db.NodeSetExpiry(node.ID, &past)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodeFromDB, err := db.getNode(types.UserID(user.ID), "testnode")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, nodeFromDB.IsExpired(), "node should be expired")
|
||||
|
||||
// Disable expiry by setting nil.
|
||||
err = db.NodeSetExpiry(node.ID, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodeFromDB, err = db.getNode(types.UserID(user.ID), "testnode")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, nodeFromDB.IsExpired(), "node should not be expired after disabling expiry")
|
||||
assert.Nil(t, nodeFromDB.Expiry, "expiry should be nil after disabling")
|
||||
}
|
||||
|
||||
func TestSetTags(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
require.NoError(t, err)
|
||||
@@ -146,6 +189,8 @@ func TestSetTags(t *testing.T) {
|
||||
pak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
pakID := pak.ID
|
||||
|
||||
_, err = db.getNode(types.UserID(user.ID), "testnode")
|
||||
require.Error(t, err)
|
||||
|
||||
@@ -159,7 +204,7 @@ func TestSetTags(t *testing.T) {
|
||||
Hostname: "testnode",
|
||||
UserID: &user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: ptr.To(pak.ID),
|
||||
AuthKeyID: &pakID,
|
||||
}
|
||||
|
||||
trx := db.DB.Save(node)
|
||||
@@ -187,6 +232,7 @@ func TestHeadscale_generateGivenName(t *testing.T) {
|
||||
suppliedName string
|
||||
randomSuffix bool
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
@@ -443,7 +489,7 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: tt.routes,
|
||||
},
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
|
||||
IPv4: new(netip.MustParseAddr("100.64.0.1")),
|
||||
}
|
||||
|
||||
err = adb.DB.Save(&node).Error
|
||||
@@ -460,17 +506,17 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
RoutableIPs: tt.routes,
|
||||
},
|
||||
Tags: []string{"tag:exit"},
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.2")),
|
||||
IPv4: new(netip.MustParseAddr("100.64.0.2")),
|
||||
}
|
||||
|
||||
err = adb.DB.Save(&nodeTagged).Error
|
||||
require.NoError(t, err)
|
||||
|
||||
users, err := adb.ListUsers()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodes, err := adb.ListNodes()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
pm, err := pmf(users, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
@@ -498,6 +544,7 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
if len(expectedRoutes1) == 0 {
|
||||
expectedRoutes1 = nil
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(expectedRoutes1, node1ByID.AllApprovedRoutes(), util.Comparers...); diff != "" {
|
||||
t.Errorf("unexpected enabled routes (-want +got):\n%s", diff)
|
||||
}
|
||||
@@ -509,6 +556,7 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
if len(expectedRoutes2) == 0 {
|
||||
expectedRoutes2 = nil
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(expectedRoutes2, node2ByID.AllApprovedRoutes(), util.Comparers...); diff != "" {
|
||||
t.Errorf("unexpected enabled routes (-want +got):\n%s", diff)
|
||||
}
|
||||
@@ -520,6 +568,7 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
func TestEphemeralGarbageCollectorOrder(t *testing.T) {
|
||||
want := []types.NodeID{1, 3}
|
||||
got := []types.NodeID{}
|
||||
|
||||
var mu sync.Mutex
|
||||
|
||||
deletionCount := make(chan struct{}, 10)
|
||||
@@ -527,6 +576,7 @@ func TestEphemeralGarbageCollectorOrder(t *testing.T) {
|
||||
e := NewEphemeralGarbageCollector(func(ni types.NodeID) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
got = append(got, ni)
|
||||
|
||||
deletionCount <- struct{}{}
|
||||
@@ -576,8 +626,10 @@ func TestEphemeralGarbageCollectorOrder(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEphemeralGarbageCollectorLoads(t *testing.T) {
|
||||
var got []types.NodeID
|
||||
var mu sync.Mutex
|
||||
var (
|
||||
got []types.NodeID
|
||||
mu sync.Mutex
|
||||
)
|
||||
|
||||
want := 1000
|
||||
|
||||
@@ -589,6 +641,7 @@ func TestEphemeralGarbageCollectorLoads(t *testing.T) {
|
||||
|
||||
// Yield to other goroutines to introduce variability
|
||||
runtime.Gosched()
|
||||
|
||||
got = append(got, ni)
|
||||
|
||||
atomic.AddInt64(&deletedCount, 1)
|
||||
@@ -616,9 +669,12 @@ func TestEphemeralGarbageCollectorLoads(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func generateRandomNumber(t *testing.T, max int64) int64 {
|
||||
//nolint:unused
|
||||
func generateRandomNumber(t *testing.T, maxVal int64) int64 {
|
||||
t.Helper()
|
||||
maxB := big.NewInt(max)
|
||||
|
||||
maxB := big.NewInt(maxVal)
|
||||
|
||||
n, err := rand.Int(rand.Reader, maxB)
|
||||
if err != nil {
|
||||
t.Fatalf("getting random number: %s", err)
|
||||
@@ -642,6 +698,9 @@ func TestListEphemeralNodes(t *testing.T) {
|
||||
pakEph, err := db.CreatePreAuthKey(user.TypedID(), false, true, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
pakID := pak.ID
|
||||
pakEphID := pakEph.ID
|
||||
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
@@ -649,7 +708,7 @@ func TestListEphemeralNodes(t *testing.T) {
|
||||
Hostname: "test",
|
||||
UserID: &user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: ptr.To(pak.ID),
|
||||
AuthKeyID: &pakID,
|
||||
}
|
||||
|
||||
nodeEph := types.Node{
|
||||
@@ -659,7 +718,7 @@ func TestListEphemeralNodes(t *testing.T) {
|
||||
Hostname: "ephemeral",
|
||||
UserID: &user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: ptr.To(pakEph.ID),
|
||||
AuthKeyID: &pakEphID,
|
||||
}
|
||||
|
||||
err = db.DB.Save(&node).Error
|
||||
@@ -722,7 +781,7 @@ func TestNodeNaming(t *testing.T) {
|
||||
nodeInvalidHostname := types.Node{
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: "我的电脑",
|
||||
Hostname: "我的电脑", //nolint:gosmopolitan // intentional i18n test data
|
||||
UserID: &user2.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
}
|
||||
@@ -746,12 +805,15 @@ func TestNodeNaming(t *testing.T) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = RegisterNodeForTest(tx, node2, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = RegisterNodeForTest(tx, nodeInvalidHostname, ptr.To(mpp("100.64.0.66/32").Addr()), nil)
|
||||
_, err = RegisterNodeForTest(tx, nodeShortHostname, ptr.To(mpp("100.64.0.67/32").Addr()), nil)
|
||||
|
||||
_, _ = RegisterNodeForTest(tx, nodeInvalidHostname, new(mpp("100.64.0.66/32").Addr()), nil)
|
||||
_, err = RegisterNodeForTest(tx, nodeShortHostname, new(mpp("100.64.0.67/32").Addr()), nil)
|
||||
|
||||
return err
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -810,25 +872,25 @@ func TestNodeNaming(t *testing.T) {
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
return RenameNode(tx, nodes[0].ID, "test")
|
||||
})
|
||||
assert.ErrorContains(t, err, "name is not unique")
|
||||
require.ErrorContains(t, err, "name is not unique")
|
||||
|
||||
// Rename invalid chars
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
return RenameNode(tx, nodes[2].ID, "我的电脑")
|
||||
return RenameNode(tx, nodes[2].ID, "我的电脑") //nolint:gosmopolitan // intentional i18n test data
|
||||
})
|
||||
assert.ErrorContains(t, err, "invalid characters")
|
||||
require.ErrorContains(t, err, "invalid characters")
|
||||
|
||||
// Rename too short
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
return RenameNode(tx, nodes[3].ID, "a")
|
||||
})
|
||||
assert.ErrorContains(t, err, "at least 2 characters")
|
||||
require.ErrorContains(t, err, "at least 2 characters")
|
||||
|
||||
// Rename with emoji
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
return RenameNode(tx, nodes[0].ID, "hostname-with-💩")
|
||||
})
|
||||
assert.ErrorContains(t, err, "invalid characters")
|
||||
require.ErrorContains(t, err, "invalid characters")
|
||||
|
||||
// Rename with only emoji
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
@@ -896,12 +958,12 @@ func TestRenameNodeComprehensive(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "chinese_chars_with_dash_rejected",
|
||||
newName: "server-北京-01",
|
||||
newName: "server-北京-01", //nolint:gosmopolitan // intentional i18n test data
|
||||
wantErr: "invalid characters",
|
||||
},
|
||||
{
|
||||
name: "chinese_only_rejected",
|
||||
newName: "我的电脑",
|
||||
newName: "我的电脑", //nolint:gosmopolitan // intentional i18n test data
|
||||
wantErr: "invalid characters",
|
||||
},
|
||||
{
|
||||
@@ -911,7 +973,7 @@ func TestRenameNodeComprehensive(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "mixed_chinese_emoji_rejected",
|
||||
newName: "测试💻机器",
|
||||
newName: "测试💻机器", //nolint:gosmopolitan // intentional i18n test data
|
||||
wantErr: "invalid characters",
|
||||
},
|
||||
{
|
||||
@@ -1000,6 +1062,7 @@ func TestListPeers(t *testing.T) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = RegisterNodeForTest(tx, node2, nil, nil)
|
||||
|
||||
return err
|
||||
@@ -1085,6 +1148,7 @@ func TestListNodes(t *testing.T) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = RegisterNodeForTest(tx, node2, nil, nil)
|
||||
|
||||
return err
|
||||
|
||||
@@ -17,7 +17,8 @@ func (hsdb *HSDatabase) SetPolicy(policy string) (*types.Policy, error) {
|
||||
Data: policy,
|
||||
}
|
||||
|
||||
if err := hsdb.DB.Clauses(clause.Returning{}).Create(&p).Error; err != nil {
|
||||
err := hsdb.DB.Clauses(clause.Returning{}).Create(&p).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -138,8 +138,8 @@ func CreatePreAuthKey(
|
||||
Hash: hash, // Store hash
|
||||
}
|
||||
|
||||
if err := tx.Save(&key).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed to create key in the database: %w", err)
|
||||
if err := tx.Save(&key).Error; err != nil { //nolint:noinlineerr
|
||||
return nil, fmt.Errorf("creating key in database: %w", err)
|
||||
}
|
||||
|
||||
return &types.PreAuthKeyNew{
|
||||
@@ -155,9 +155,7 @@ func CreatePreAuthKey(
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) ListPreAuthKeys() ([]types.PreAuthKey, error) {
|
||||
return Read(hsdb.DB, func(rx *gorm.DB) ([]types.PreAuthKey, error) {
|
||||
return ListPreAuthKeys(rx)
|
||||
})
|
||||
return Read(hsdb.DB, ListPreAuthKeys)
|
||||
}
|
||||
|
||||
// ListPreAuthKeys returns all PreAuthKeys in the database.
|
||||
@@ -296,7 +294,7 @@ func DestroyPreAuthKey(tx *gorm.DB, id uint64) error {
|
||||
Where("auth_key_id = ?", id).
|
||||
Update("auth_key_id", nil).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to clear auth_key_id on nodes: %w", err)
|
||||
return fmt.Errorf("clearing auth_key_id on nodes: %w", err)
|
||||
}
|
||||
|
||||
// Then delete the pre-auth key
|
||||
@@ -325,14 +323,15 @@ func (hsdb *HSDatabase) DeletePreAuthKey(id uint64) error {
|
||||
func UsePreAuthKey(tx *gorm.DB, k *types.PreAuthKey) error {
|
||||
err := tx.Model(k).Update("used", true).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update key used status in the database: %w", err)
|
||||
return fmt.Errorf("updating key used status in database: %w", err)
|
||||
}
|
||||
|
||||
k.Used = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkExpirePreAuthKey marks a PreAuthKey as expired.
|
||||
// ExpirePreAuthKey marks a PreAuthKey as expired.
|
||||
func ExpirePreAuthKey(tx *gorm.DB, id uint64) error {
|
||||
now := time.Now()
|
||||
return tx.Model(&types.PreAuthKey{}).Where("id = ?", id).Update("expiration", now).Error
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
func TestCreatePreAuthKey(t *testing.T) {
|
||||
@@ -24,7 +23,7 @@ func TestCreatePreAuthKey(t *testing.T) {
|
||||
test: func(t *testing.T, db *HSDatabase) {
|
||||
t.Helper()
|
||||
|
||||
_, err := db.CreatePreAuthKey(ptr.To(types.UserID(12345)), true, false, nil, nil)
|
||||
_, err := db.CreatePreAuthKey(new(types.UserID(12345)), true, false, nil, nil)
|
||||
assert.Error(t, err)
|
||||
},
|
||||
},
|
||||
@@ -127,7 +126,7 @@ func TestCannotDeleteAssignedPreAuthKey(t *testing.T) {
|
||||
Hostname: "testest",
|
||||
UserID: &user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: ptr.To(key.ID),
|
||||
AuthKeyID: new(key.ID),
|
||||
}
|
||||
db.DB.Save(&node)
|
||||
|
||||
|
||||
@@ -79,6 +79,8 @@ CREATE TABLE nodes(
|
||||
ipv6 text,
|
||||
hostname text,
|
||||
given_name varchar(63),
|
||||
-- user_id is NULL for tagged nodes (owned by tags, not a user).
|
||||
-- Only set for user-owned nodes (no tags).
|
||||
user_id integer,
|
||||
register_method text,
|
||||
tags text,
|
||||
@@ -104,3 +106,9 @@ CREATE TABLE policies(
|
||||
deleted_at datetime
|
||||
);
|
||||
CREATE INDEX idx_policies_deleted_at ON policies(deleted_at);
|
||||
|
||||
CREATE TABLE database_versions(
|
||||
id integer PRIMARY KEY,
|
||||
version text NOT NULL,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
@@ -362,7 +362,8 @@ func (c *Config) Validate() error {
|
||||
// ToURL builds a properly encoded SQLite connection string using _pragma parameters
|
||||
// compatible with modernc.org/sqlite driver.
|
||||
func (c *Config) ToURL() (string, error) {
|
||||
if err := c.Validate(); err != nil {
|
||||
err := c.Validate()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid config: %w", err)
|
||||
}
|
||||
|
||||
@@ -372,18 +373,23 @@ func (c *Config) ToURL() (string, error) {
|
||||
if c.BusyTimeout > 0 {
|
||||
pragmas = append(pragmas, fmt.Sprintf("busy_timeout=%d", c.BusyTimeout))
|
||||
}
|
||||
|
||||
if c.JournalMode != "" {
|
||||
pragmas = append(pragmas, fmt.Sprintf("journal_mode=%s", c.JournalMode))
|
||||
}
|
||||
|
||||
if c.AutoVacuum != "" {
|
||||
pragmas = append(pragmas, fmt.Sprintf("auto_vacuum=%s", c.AutoVacuum))
|
||||
}
|
||||
|
||||
if c.WALAutocheckpoint >= 0 {
|
||||
pragmas = append(pragmas, fmt.Sprintf("wal_autocheckpoint=%d", c.WALAutocheckpoint))
|
||||
}
|
||||
|
||||
if c.Synchronous != "" {
|
||||
pragmas = append(pragmas, fmt.Sprintf("synchronous=%s", c.Synchronous))
|
||||
}
|
||||
|
||||
if c.ForeignKeys {
|
||||
pragmas = append(pragmas, "foreign_keys=ON")
|
||||
}
|
||||
|
||||
@@ -294,6 +294,7 @@ func TestConfigToURL(t *testing.T) {
|
||||
t.Errorf("Config.ToURL() error = %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if got != tt.want {
|
||||
t.Errorf("Config.ToURL() = %q, want %q", got, tt.want)
|
||||
}
|
||||
@@ -306,6 +307,7 @@ func TestConfigToURLInvalid(t *testing.T) {
|
||||
Path: "",
|
||||
BusyTimeout: -1,
|
||||
}
|
||||
|
||||
_, err := config.ToURL()
|
||||
if err == nil {
|
||||
t.Error("Config.ToURL() with invalid config should return error")
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user