mirror of
https://github.com/juanfont/headscale.git
synced 2026-02-12 10:47:42 +01:00
Compare commits
195 Commits
v0.24.0-be
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ab70b4e37e | ||
|
|
a058bf3cd3 | ||
|
|
b2a18830ed | ||
|
|
9779adc0b7 | ||
|
|
e7fe645be5 | ||
|
|
bcd80ee773 | ||
|
|
c04e17d82e | ||
|
|
98fc0563ac | ||
|
|
3123d5286b | ||
|
|
7fce5065c4 | ||
|
|
a98d9bd05f | ||
|
|
46c59a3fff | ||
|
|
044193bf34 | ||
|
|
a8f2eebf66 | ||
|
|
6220e64978 | ||
|
|
c6d7b512bd | ||
|
|
b904276f2b | ||
|
|
4a8d2d9ed3 | ||
|
|
22e6094a90 | ||
|
|
73023c2ec3 | ||
|
|
5ba7120418 | ||
|
|
d311d2e206 | ||
|
|
05996a5048 | ||
|
|
4668e5dd96 | ||
|
|
c6736dd6d6 | ||
|
|
855c48aec2 | ||
|
|
ded049b905 | ||
|
|
3bad5d5590 | ||
|
|
d461db3abd | ||
|
|
efc6974017 | ||
|
|
3f72ee9de8 | ||
|
|
e73b2a9fb9 | ||
|
|
081af2674b | ||
|
|
1553f0ab53 | ||
|
|
a975b6a8b1 | ||
|
|
afc11e1f0c | ||
|
|
ea7376f522 | ||
|
|
d325211617 | ||
|
|
bad783321e | ||
|
|
b8044c29dd | ||
|
|
df69840f92 | ||
|
|
76ca7a2b50 | ||
|
|
cd704570be | ||
|
|
43c9c50af4 | ||
|
|
4a941a2cb4 | ||
|
|
d2879b2b36 | ||
|
|
a52f1df180 | ||
|
|
1605e2a7a9 | ||
|
|
6750414db1 | ||
|
|
b50e10a1be | ||
|
|
c15aa541bb | ||
|
|
49b3468845 | ||
|
|
bd6ed80936 | ||
|
|
30525cee0e | ||
|
|
2dc2f3b3f0 | ||
|
|
d7a503a34e | ||
|
|
62b489dc68 | ||
|
|
8c7e650616 | ||
|
|
43943aeee9 | ||
|
|
d81b0053e5 | ||
|
|
dd0cbdf40c | ||
|
|
37dc0dad35 | ||
|
|
377b854dd8 | ||
|
|
56db4ed0f1 | ||
|
|
833e0f66f1 | ||
|
|
1dddd3e93b | ||
|
|
9a86ffc102 | ||
|
|
45e38cb080 | ||
|
|
b9868f6516 | ||
|
|
f317a85ab4 | ||
|
|
53d9c95160 | ||
|
|
03a91693ac | ||
|
|
cb7c0173ec | ||
|
|
18d21d3585 | ||
|
|
e7d2d79134 | ||
|
|
d810597414 | ||
|
|
93afb03f67 | ||
|
|
e4d10ad964 | ||
|
|
7dc86366b4 | ||
|
|
c923f461ab | ||
|
|
a4a203b9a3 | ||
|
|
4651d06fa8 | ||
|
|
eb1ecefd9e | ||
|
|
6b6509eeeb | ||
|
|
cfe9bbf829 | ||
|
|
8f9fbf16f1 | ||
|
|
f1206328dc | ||
|
|
57861507ab | ||
|
|
2b38f7bef7 | ||
|
|
9a4d0e1a99 | ||
|
|
30539b2e26 | ||
|
|
098ab0357c | ||
|
|
56d085bd08 | ||
|
|
92e587a82c | ||
|
|
f3a1e693f2 | ||
|
|
f783555469 | ||
|
|
710d75367e | ||
|
|
c30e3a4762 | ||
|
|
3287aa8bba | ||
|
|
8e7e52cf3a | ||
|
|
1e0516b99d | ||
|
|
0fbe392499 | ||
|
|
109989005d | ||
|
|
0d3134720b | ||
|
|
d2a6356d89 | ||
|
|
5a18e91317 | ||
|
|
e3521be705 | ||
|
|
f52f15ff08 | ||
|
|
cbc99010f0 | ||
|
|
b5953d689c | ||
|
|
badbb68217 | ||
|
|
603f3ad490 | ||
|
|
707438f25e | ||
|
|
24ad235917 | ||
|
|
00d5d647ed | ||
|
|
cbce8f6011 | ||
|
|
05202099f7 | ||
|
|
800456018a | ||
|
|
586a20fbff | ||
|
|
818046f240 | ||
|
|
fe06a00d45 | ||
|
|
0b5c29e875 | ||
|
|
0a243b4162 | ||
|
|
29ba29478b | ||
|
|
e52f1e87ce | ||
|
|
87326f5c4f | ||
|
|
b6fbd37539 | ||
|
|
7891378f57 | ||
|
|
16868190c8 | ||
|
|
da2ca054b1 | ||
|
|
bcff0eaae7 | ||
|
|
b220fb7d51 | ||
|
|
2cce3a99eb | ||
|
|
bbe57f6cd4 | ||
|
|
604f7f6282 | ||
|
|
c61fbe9c5f | ||
|
|
b943cce868 | ||
|
|
6403c8d5d2 | ||
|
|
b3fa16fbda | ||
|
|
1f0110fe06 | ||
|
|
b92bd3d27e | ||
|
|
3bf7d5a9c9 | ||
|
|
1d65865425 | ||
|
|
c53ff2ce00 | ||
|
|
b4ac8cd9a3 | ||
|
|
22277d1fc7 | ||
|
|
9ae3570154 | ||
|
|
f12cb2e048 | ||
|
|
8c09afe20c | ||
|
|
8b92c017ec | ||
|
|
9a7890d56b | ||
|
|
45752db0f6 | ||
|
|
1c7f3bc440 | ||
|
|
9bd143852f | ||
|
|
d57a55c024 | ||
|
|
e172c29360 | ||
|
|
cd3b8e68ff | ||
|
|
f44b1d37c4 | ||
|
|
7ba6ad3489 | ||
|
|
2c279e0a7b | ||
|
|
4c8e847f47 | ||
|
|
97e5d95399 | ||
|
|
d1dbe4ece9 | ||
|
|
9e3f945eda | ||
|
|
615ee5df75 | ||
|
|
c1f42cdf4b | ||
|
|
aa76980b43 | ||
|
|
5b986ed0a7 | ||
|
|
8076c94444 | ||
|
|
e88406e837 | ||
|
|
e4a3dcc3b8 | ||
|
|
caad5c613d | ||
|
|
38aef77e54 | ||
|
|
1ab7b315a2 | ||
|
|
610597bfb7 | ||
|
|
ede4f97a16 | ||
|
|
fa641e38b8 | ||
|
|
41bad2b9fd | ||
|
|
f9bbfa5eab | ||
|
|
b81420bef1 | ||
|
|
9313e5b058 | ||
|
|
770f3dcb93 | ||
|
|
af4508b9dc | ||
|
|
bbc93a90a2 | ||
|
|
0acb2b5647 | ||
|
|
3269cfdca0 | ||
|
|
319ce67c87 | ||
|
|
47b405d6c6 | ||
|
|
65304a0ce7 | ||
|
|
e270169c13 | ||
|
|
7d937c6bd0 | ||
|
|
ccc895b4c6 | ||
|
|
5345f19693 | ||
|
|
ec8729b772 | ||
|
|
e00b9d9a91 |
@@ -1,15 +0,0 @@
|
||||
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
|
||||
language: "en-GB"
|
||||
early_access: false
|
||||
reviews:
|
||||
profile: "chill"
|
||||
request_changes_workflow: false
|
||||
high_level_summary: true
|
||||
poem: true
|
||||
review_status: true
|
||||
collapse_walkthrough: false
|
||||
auto_review:
|
||||
enabled: true
|
||||
drafts: true
|
||||
chat:
|
||||
auto_reply: true
|
||||
@@ -17,3 +17,7 @@ LICENSE
|
||||
.vscode
|
||||
|
||||
*.sock
|
||||
|
||||
node_modules/
|
||||
package-lock.json
|
||||
package.json
|
||||
|
||||
42
.github/ISSUE_TEMPLATE/bug_report.yaml
vendored
42
.github/ISSUE_TEMPLATE/bug_report.yaml
vendored
@@ -6,14 +6,18 @@ body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is this a support request?
|
||||
description: This issue tracker is for bugs and feature requests only. If you need help, please use ask in our Discord community
|
||||
description:
|
||||
This issue tracker is for bugs and feature requests only. If you need
|
||||
help, please use ask in our Discord community
|
||||
options:
|
||||
- label: This is not a support request
|
||||
required: true
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for this?
|
||||
description: Please search to see if an issue already exists for the bug you encountered.
|
||||
description:
|
||||
Please search to see if an issue already exists for the bug you
|
||||
encountered.
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
@@ -44,10 +48,16 @@ body:
|
||||
attributes:
|
||||
label: Environment
|
||||
description: |
|
||||
Please provide information about your environment.
|
||||
If you are using a container, always provide the headscale version and not only the Docker image version.
|
||||
Please do not put "latest".
|
||||
|
||||
If you are experiencing a problem during an upgrade, please provide the versions of the old and new versions of Headscale and Tailscale.
|
||||
|
||||
examples:
|
||||
- **OS**: Ubuntu 20.04
|
||||
- **Headscale version**: 0.22.3
|
||||
- **Tailscale version**: 1.64.0
|
||||
- **OS**: Ubuntu 24.04
|
||||
- **Headscale version**: 0.24.3
|
||||
- **Tailscale version**: 1.80.0
|
||||
value: |
|
||||
- OS:
|
||||
- Headscale version:
|
||||
@@ -65,19 +75,31 @@ body:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Anything else?
|
||||
label: Debug information
|
||||
description: |
|
||||
Links? References? Anything that will give us more context about the issue you are encountering!
|
||||
Please have a look at our [Debugging and troubleshooting
|
||||
guide](https://headscale.net/development/ref/debug/) to learn about
|
||||
common debugging techniques.
|
||||
|
||||
Links? References? Anything that will give us more context about the issue you are encountering.
|
||||
If **any** of these are omitted we will likely close your issue, do **not** ignore them.
|
||||
|
||||
- Client netmap dump (see below)
|
||||
- ACL configuration
|
||||
- Policy configuration
|
||||
- Headscale configuration
|
||||
- Headscale log (with `trace` enabled)
|
||||
|
||||
Dump the netmap of tailscale clients:
|
||||
`tailscale debug netmap > DESCRIPTIVE_NAME.json`
|
||||
|
||||
Please provide information describing the netmap, which client, which headscale version etc.
|
||||
Dump the status of tailscale clients:
|
||||
`tailscale status --json > DESCRIPTIVE_NAME.json`
|
||||
|
||||
Get the logs of a Tailscale client that is not working as expected.
|
||||
`tailscale debug daemon-logs`
|
||||
|
||||
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
||||
**Ensure** you use formatting for files you attach.
|
||||
Do **not** paste in long files.
|
||||
validations:
|
||||
required: false
|
||||
required: true
|
||||
|
||||
9
.github/ISSUE_TEMPLATE/feature_request.yaml
vendored
9
.github/ISSUE_TEMPLATE/feature_request.yaml
vendored
@@ -16,13 +16,15 @@ body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Description
|
||||
description: A clear and precise description of what new or changed feature you want.
|
||||
description:
|
||||
A clear and precise description of what new or changed feature you want.
|
||||
validations:
|
||||
required: true
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Contribution
|
||||
description: Are you willing to contribute to the implementation of this feature?
|
||||
description:
|
||||
Are you willing to contribute to the implementation of this feature?
|
||||
options:
|
||||
- label: I can write the design doc for this feature
|
||||
required: false
|
||||
@@ -31,6 +33,7 @@ body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: How can it be implemented?
|
||||
description: Free text for your ideas on how this feature could be implemented.
|
||||
description:
|
||||
Free text for your ideas on how this feature could be implemented.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
38
.github/workflows/build.yml
vendored
38
.github/workflows/build.yml
vendored
@@ -17,12 +17,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@v3
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
@@ -31,10 +31,15 @@ jobs:
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Run nix build
|
||||
id: build
|
||||
@@ -52,7 +57,7 @@ jobs:
|
||||
exit $BUILD_STATUS
|
||||
|
||||
- name: Nix gosum diverging
|
||||
uses: actions/github-script@v6
|
||||
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
|
||||
if: failure() && steps.build.outcome == 'failure'
|
||||
with:
|
||||
github-token: ${{secrets.GITHUB_TOKEN}}
|
||||
@@ -64,7 +69,7 @@ jobs:
|
||||
body: 'Nix build failed with wrong gosum, please update "vendorSha256" (${{ steps.build.outputs.OLD_HASH }}) for the "headscale" package in flake.nix with the new SHA: ${{ steps.build.outputs.NEW_HASH }}'
|
||||
})
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
name: headscale-linux
|
||||
@@ -74,22 +79,25 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
env:
|
||||
- "GOARCH=arm GOOS=linux GOARM=5"
|
||||
- "GOARCH=arm GOOS=linux GOARM=6"
|
||||
- "GOARCH=arm GOOS=linux GOARM=7"
|
||||
- "GOARCH=arm64 GOOS=linux"
|
||||
- "GOARCH=386 GOOS=linux"
|
||||
- "GOARCH=amd64 GOOS=linux"
|
||||
- "GOARCH=arm64 GOOS=darwin"
|
||||
- "GOARCH=amd64 GOOS=darwin"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Run go cross compile
|
||||
run: env ${{ matrix.env }} nix develop --command -- go build -o "headscale" ./cmd/headscale
|
||||
- uses: actions/upload-artifact@v4
|
||||
run:
|
||||
env ${{ matrix.env }} nix develop --command -- go build -o "headscale"
|
||||
./cmd/headscale
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: "headscale-${{ matrix.env }}"
|
||||
path: "headscale"
|
||||
|
||||
55
.github/workflows/check-generated.yml
vendored
Normal file
55
.github/workflows/check-generated.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: Check Generated Files
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check-generated:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
- '*.nix'
|
||||
- 'go.*'
|
||||
- '**/*.go'
|
||||
- '**/*.proto'
|
||||
- 'buf.gen.yaml'
|
||||
- 'tools/**'
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Run make generate
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: nix develop --command -- make generate
|
||||
|
||||
- name: Check for uncommitted changes
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: |
|
||||
if ! git diff --exit-code; then
|
||||
echo "❌ Generated files are not up to date!"
|
||||
echo "Please run 'make generate' and commit the changes."
|
||||
exit 1
|
||||
else
|
||||
echo "✅ All generated files are up to date."
|
||||
fi
|
||||
15
.github/workflows/check-tests.yaml
vendored
15
.github/workflows/check-tests.yaml
vendored
@@ -10,12 +10,12 @@ jobs:
|
||||
check-tests:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@v3
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
@@ -24,15 +24,20 @@ jobs:
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Generate and check integration tests
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: |
|
||||
nix develop --command bash -c "cd cmd/gh-action-integration-generator/ && go generate"
|
||||
nix develop --command bash -c "cd .github/workflows && go generate"
|
||||
git diff --exit-code .github/workflows/test-integration.yaml
|
||||
|
||||
- name: Show missing tests
|
||||
|
||||
6
.github/workflows/docs-deploy.yml
vendored
6
.github/workflows/docs-deploy.yml
vendored
@@ -21,15 +21,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: 3.x
|
||||
- name: Setup cache
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
with:
|
||||
key: ${{ github.ref }}
|
||||
path: .cache
|
||||
|
||||
6
.github/workflows/docs-test.yml
vendored
6
.github/workflows/docs-test.yml
vendored
@@ -11,13 +11,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Install python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: 3.x
|
||||
- name: Setup cache
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
with:
|
||||
key: ${{ github.ref }}
|
||||
path: .cache
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package main
|
||||
|
||||
//go:generate go run ./main.go
|
||||
//go:generate go run ./gh-action-integration-generator.go
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -38,23 +38,29 @@ func findTests() []string {
|
||||
return tests
|
||||
}
|
||||
|
||||
func updateYAML(tests []string) {
|
||||
func updateYAML(tests []string, jobName string, testPath string) {
|
||||
testsForYq := fmt.Sprintf("[%s]", strings.Join(tests, ", "))
|
||||
|
||||
yqCommand := fmt.Sprintf(
|
||||
"yq eval '.jobs.integration-test.strategy.matrix.test = %s' ../../.github/workflows/test-integration.yaml -i",
|
||||
"yq eval '.jobs.%s.strategy.matrix.test = %s' %s -i",
|
||||
jobName,
|
||||
testsForYq,
|
||||
testPath,
|
||||
)
|
||||
cmd := exec.Command("bash", "-c", yqCommand)
|
||||
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
log.Printf("stdout: %s", stdout.String())
|
||||
log.Printf("stderr: %s", stderr.String())
|
||||
log.Fatalf("failed to run yq command: %s", err)
|
||||
}
|
||||
|
||||
fmt.Println("YAML file updated successfully")
|
||||
fmt.Printf("YAML file (%s) job %s updated successfully\n", testPath, jobName)
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -65,5 +71,21 @@ func main() {
|
||||
quotedTests[i] = fmt.Sprintf("\"%s\"", test)
|
||||
}
|
||||
|
||||
updateYAML(quotedTests)
|
||||
// Define selected tests for PostgreSQL
|
||||
postgresTestNames := []string{
|
||||
"TestACLAllowUserDst",
|
||||
"TestPingAllByIP",
|
||||
"TestEphemeral2006DeletedTooQuickly",
|
||||
"TestPingAllByIPManyUpDown",
|
||||
"TestSubnetRouterMultiNetwork",
|
||||
}
|
||||
|
||||
quotedPostgresTests := make([]string, len(postgresTestNames))
|
||||
for i, test := range postgresTestNames {
|
||||
quotedPostgresTests[i] = fmt.Sprintf("\"%s\"", test)
|
||||
}
|
||||
|
||||
// Update both SQLite and PostgreSQL job matrices
|
||||
updateYAML(quotedTests, "sqlite", "./test-integration.yaml")
|
||||
updateYAML(quotedPostgresTests, "postgres", "./test-integration.yaml")
|
||||
}
|
||||
4
.github/workflows/gh-actions-updater.yaml
vendored
4
.github/workflows/gh-actions-updater.yaml
vendored
@@ -11,13 +11,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
# [Required] Access token with `workflow` scope.
|
||||
token: ${{ secrets.WORKFLOW_SECRET }}
|
||||
|
||||
- name: Run GitHub Actions Version Updater
|
||||
uses: saadmk11/github-actions-version-updater@v0.8.1
|
||||
uses: saadmk11/github-actions-version-updater@64be81ba69383f81f2be476703ea6570c4c8686e # v0.8.1
|
||||
with:
|
||||
# [Required] Access token with `workflow` scope.
|
||||
token: ${{ secrets.WORKFLOW_SECRET }}
|
||||
|
||||
95
.github/workflows/integration-test-template.yml
vendored
Normal file
95
.github/workflows/integration-test-template.yml
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
name: Integration Test Template
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
test:
|
||||
required: true
|
||||
type: string
|
||||
postgres_flag:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
database_name:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
# Github does not allow us to access secrets in pull requests,
|
||||
# so this env var is used to check if we have the secret or not.
|
||||
# If we have the secrets, meaning we are running on push in a fork,
|
||||
# there might be secrets available for more debugging.
|
||||
# If TS_OAUTH_CLIENT_ID and TS_OAUTH_SECRET is set, then the job
|
||||
# will join a debug tailscale network, set up SSH and a tmux session.
|
||||
# The SSH will be configured to use the SSH key of the Github user
|
||||
# that triggered the build.
|
||||
HAS_TAILSCALE_SECRET: ${{ secrets.TS_OAUTH_CLIENT_ID }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
- '*.nix'
|
||||
- 'go.*'
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- name: Tailscale
|
||||
if: ${{ env.HAS_TAILSCALE_SECRET }}
|
||||
uses: tailscale/github-action@6986d2c82a91fbac2949fe01f5bab95cf21b5102 # v3.2.2
|
||||
with:
|
||||
oauth-client-id: ${{ secrets.TS_OAUTH_CLIENT_ID }}
|
||||
oauth-secret: ${{ secrets.TS_OAUTH_SECRET }}
|
||||
tags: tag:gh
|
||||
- name: Setup SSH server for Actor
|
||||
if: ${{ env.HAS_TAILSCALE_SECRET }}
|
||||
uses: alexellis/setup-sshd-actor@master
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
- name: Run Integration Test
|
||||
uses: Wandalen/wretry.action@e68c23e6309f2871ca8ae4763e7629b9c258e1ea # v3.8.0
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
# Our integration tests are started like a thundering herd, often
|
||||
# hitting limits of the various external repositories we depend on
|
||||
# like docker hub. This will retry jobs every 5 min, 10 times,
|
||||
# hopefully letting us avoid manual intervention and restarting jobs.
|
||||
# One could of course argue that we should invest in trying to avoid
|
||||
# this, but currently it seems like a larger investment to be cleverer
|
||||
# about this.
|
||||
# Some of the jobs might still require manual restart as they are really
|
||||
# slow and this will cause them to eventually be killed by Github actions.
|
||||
attempt_delay: 300000 # 5 min
|
||||
attempt_limit: 2
|
||||
command: |
|
||||
nix develop --command -- hi run --stats --ts-memory-limit=300 --hs-memory-limit=500 "^${{ inputs.test }}$" \
|
||||
--timeout=120m \
|
||||
${{ inputs.postgres_flag }}
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: always() && steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
name: ${{ inputs.database_name }}-${{ inputs.test }}-logs
|
||||
path: "control_logs/*/*.log"
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: always() && steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
name: ${{ inputs.database_name }}-${{ inputs.test }}-archives
|
||||
path: "control_logs/*/*.tar"
|
||||
- name: Setup a blocking tmux session
|
||||
if: ${{ env.HAS_TAILSCALE_SECRET }}
|
||||
uses: alexellis/block-with-tmux-action@master
|
||||
47
.github/workflows/lint.yml
vendored
47
.github/workflows/lint.yml
vendored
@@ -10,12 +10,12 @@ jobs:
|
||||
golangci-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@v3
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
@@ -24,24 +24,34 @@ jobs:
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: golangci-lint
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: nix develop --command -- golangci-lint run --new-from-rev=${{github.event.pull_request.base.sha}} --out-format=colored-line-number
|
||||
run: nix develop --command -- golangci-lint run
|
||||
--new-from-rev=${{github.event.pull_request.base.sha}}
|
||||
--output.text.path=stdout
|
||||
--output.text.print-linter-name
|
||||
--output.text.print-issued-lines
|
||||
--output.text.colors
|
||||
|
||||
prettier-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@v3
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
@@ -55,21 +65,32 @@ jobs:
|
||||
- '**/*.css'
|
||||
- '**/*.scss'
|
||||
- '**/*.html'
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Prettify code
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: nix develop --command -- prettier --no-error-on-unmatched-pattern --ignore-unknown --check **/*.{ts,js,md,yaml,yml,sass,css,scss,html}
|
||||
run: nix develop --command -- prettier --no-error-on-unmatched-pattern
|
||||
--ignore-unknown --check **/*.{ts,js,md,yaml,yml,sass,css,scss,html}
|
||||
|
||||
proto-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Buf lint
|
||||
run: nix develop --command -- buf lint proto
|
||||
|
||||
15
.github/workflows/release.yml
vendored
15
.github/workflows/release.yml
vendored
@@ -13,25 +13,30 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Run goreleaser
|
||||
run: nix develop --command -- goreleaser release --clean
|
||||
|
||||
10
.github/workflows/stale.yml
vendored
10
.github/workflows/stale.yml
vendored
@@ -12,13 +12,17 @@ jobs:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
|
||||
with:
|
||||
days-before-issue-stale: 90
|
||||
days-before-issue-close: 7
|
||||
stale-issue-label: "stale"
|
||||
stale-issue-message: "This issue is stale because it has been open for 90 days with no activity."
|
||||
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
|
||||
stale-issue-message:
|
||||
"This issue is stale because it has been open for 90 days with no
|
||||
activity."
|
||||
close-issue-message:
|
||||
"This issue was closed because it has been inactive for 14 days
|
||||
since being marked as stale."
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
exempt-issue-labels: "no-stale-bot"
|
||||
|
||||
120
.github/workflows/test-integration.yaml
vendored
120
.github/workflows/test-integration.yaml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Integration Tests
|
||||
name: integration
|
||||
# To debug locally on a branch, and when needing secrets
|
||||
# change this to include `push` so the build is ran on
|
||||
# the main repository.
|
||||
@@ -7,8 +7,7 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
jobs:
|
||||
integration-test:
|
||||
runs-on: ubuntu-latest
|
||||
sqlite:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -22,9 +21,16 @@ jobs:
|
||||
- TestACLNamedHostsCanReach
|
||||
- TestACLDevice1CanAccessDevice2
|
||||
- TestPolicyUpdateWhileRunningWithCLIInDatabase
|
||||
- TestACLAutogroupMember
|
||||
- TestACLAutogroupTagged
|
||||
- TestAuthKeyLogoutAndReloginSameUser
|
||||
- TestAuthKeyLogoutAndReloginNewUser
|
||||
- TestAuthKeyLogoutAndReloginSameUserExpiredKey
|
||||
- TestOIDCAuthenticationPingAll
|
||||
- TestOIDCExpireNodesBasedOnTokenExpiry
|
||||
- TestOIDC024UserCreation
|
||||
- TestOIDCAuthenticationWithPKCE
|
||||
- TestOIDCReloginSameNodeNewUser
|
||||
- TestAuthWebFlowAuthenticationPingAll
|
||||
- TestAuthWebFlowLogoutAndRelogin
|
||||
- TestUserCommand
|
||||
@@ -44,12 +50,10 @@ jobs:
|
||||
- TestDERPVerifyEndpoint
|
||||
- TestResolveMagicDNS
|
||||
- TestResolveMagicDNSExtraRecordsPath
|
||||
- TestValidateResolvConf
|
||||
- TestDERPServerScenario
|
||||
- TestDERPServerWebsocketScenario
|
||||
- TestPingAllByIP
|
||||
- TestPingAllByIPPublicDERP
|
||||
- TestAuthKeyLogoutAndRelogin
|
||||
- TestEphemeral
|
||||
- TestEphemeralInAlternateTimezone
|
||||
- TestEphemeral2006DeletedTooQuickly
|
||||
@@ -62,92 +66,36 @@ jobs:
|
||||
- Test2118DeletingOnlineNodePanics
|
||||
- TestEnablingRoutes
|
||||
- TestHASubnetRouterFailover
|
||||
- TestEnableDisableAutoApprovedRoute
|
||||
- TestAutoApprovedSubRoute2068
|
||||
- TestSubnetRouteACL
|
||||
- TestEnablingExitRoutes
|
||||
- TestSubnetRouterMultiNetwork
|
||||
- TestSubnetRouterMultiNetworkExitNode
|
||||
- TestAutoApproveMultiNetwork
|
||||
- TestSubnetRouteACLFiltering
|
||||
- TestHeadscale
|
||||
- TestCreateTailscale
|
||||
- TestTailscaleNodesJoiningHeadcale
|
||||
- TestSSHOneUserToAll
|
||||
- TestSSHMultipleUsersAllToAll
|
||||
- TestSSHNoSSHConfigured
|
||||
- TestSSHIsBlockedInACL
|
||||
- TestSSHUserOnlyIsolation
|
||||
database: [postgres, sqlite]
|
||||
env:
|
||||
# Github does not allow us to access secrets in pull requests,
|
||||
# so this env var is used to check if we have the secret or not.
|
||||
# If we have the secrets, meaning we are running on push in a fork,
|
||||
# there might be secrets available for more debugging.
|
||||
# If TS_OAUTH_CLIENT_ID and TS_OAUTH_SECRET is set, then the job
|
||||
# will join a debug tailscale network, set up SSH and a tmux session.
|
||||
# The SSH will be configured to use the SSH key of the Github user
|
||||
# that triggered the build.
|
||||
HAS_TAILSCALE_SECRET: ${{ secrets.TS_OAUTH_CLIENT_ID }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
- '*.nix'
|
||||
- 'go.*'
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- name: Tailscale
|
||||
if: ${{ env.HAS_TAILSCALE_SECRET }}
|
||||
uses: tailscale/github-action@v2
|
||||
with:
|
||||
oauth-client-id: ${{ secrets.TS_OAUTH_CLIENT_ID }}
|
||||
oauth-secret: ${{ secrets.TS_OAUTH_SECRET }}
|
||||
tags: tag:gh
|
||||
- name: Setup SSH server for Actor
|
||||
if: ${{ env.HAS_TAILSCALE_SECRET }}
|
||||
uses: alexellis/setup-sshd-actor@master
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: satackey/action-docker-layer-caching@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
continue-on-error: true
|
||||
- name: Run Integration Test
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
env:
|
||||
USE_POSTGRES: ${{ matrix.database == 'postgres' && '1' || '0' }}
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
--env HEADSCALE_INTEGRATION_POSTGRES=${{env.USE_POSTGRES}} \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^${{ matrix.test }}$"
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: always() && steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
name: ${{ matrix.test }}-${{matrix.database}}-logs
|
||||
path: "control_logs/*.log"
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: always() && steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
name: ${{ matrix.test }}-${{matrix.database}}-pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
- name: Setup a blocking tmux session
|
||||
if: ${{ env.HAS_TAILSCALE_SECRET }}
|
||||
uses: alexellis/block-with-tmux-action@master
|
||||
uses: ./.github/workflows/integration-test-template.yml
|
||||
with:
|
||||
test: ${{ matrix.test }}
|
||||
postgres_flag: "--postgres=0"
|
||||
database_name: "sqlite"
|
||||
postgres:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
test:
|
||||
- TestACLAllowUserDst
|
||||
- TestPingAllByIP
|
||||
- TestEphemeral2006DeletedTooQuickly
|
||||
- TestPingAllByIPManyUpDown
|
||||
- TestSubnetRouterMultiNetwork
|
||||
uses: ./.github/workflows/integration-test-template.yml
|
||||
with:
|
||||
test: ${{ matrix.test }}
|
||||
postgres_flag: "--postgres=1"
|
||||
database_name: "postgres"
|
||||
|
||||
19
.github/workflows/test.yml
vendored
19
.github/workflows/test.yml
vendored
@@ -11,13 +11,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@v3
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
@@ -27,11 +27,22 @@ jobs:
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Run tests
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
env:
|
||||
# As of 2025-01-06, these env vars was not automatically
|
||||
# set anymore which breaks the initdb for postgres on
|
||||
# some of the database migration tests.
|
||||
LC_ALL: "en_US.UTF-8"
|
||||
LC_CTYPE: "en_US.UTF-8"
|
||||
run: nix develop --command -- gotestsum
|
||||
|
||||
6
.github/workflows/update-flake.yml
vendored
6
.github/workflows/update-flake.yml
vendored
@@ -10,10 +10,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Install Nix
|
||||
uses: DeterminateSystems/nix-installer-action@main
|
||||
uses: DeterminateSystems/nix-installer-action@21a544727d0c62386e78b4befe52d19ad12692e3 # v17
|
||||
- name: Update flake.lock
|
||||
uses: DeterminateSystems/update-flake-lock@main
|
||||
uses: DeterminateSystems/update-flake-lock@428c2b58a4b7414dabd372acb6a03dba1084d3ab # v25
|
||||
with:
|
||||
pr-title: "Update flake.lock"
|
||||
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -1,6 +1,9 @@
|
||||
ignored/
|
||||
tailscale/
|
||||
.vscode/
|
||||
.claude/
|
||||
|
||||
*.prof
|
||||
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
@@ -20,9 +23,9 @@ vendor/
|
||||
|
||||
dist/
|
||||
/headscale
|
||||
config.json
|
||||
config.yaml
|
||||
config*.yaml
|
||||
!config-example.yaml
|
||||
derp.yaml
|
||||
*.hujson
|
||||
*.key
|
||||
@@ -46,3 +49,7 @@ integration_test/etc/config.dump.yaml
|
||||
/site
|
||||
|
||||
__debug_bin
|
||||
|
||||
node_modules/
|
||||
package-lock.json
|
||||
package.json
|
||||
|
||||
126
.golangci.yaml
126
.golangci.yaml
@@ -1,70 +1,80 @@
|
||||
---
|
||||
run:
|
||||
timeout: 10m
|
||||
build-tags:
|
||||
- ts2019
|
||||
|
||||
issues:
|
||||
skip-dirs:
|
||||
- gen
|
||||
version: "2"
|
||||
linters:
|
||||
enable-all: true
|
||||
default: all
|
||||
disable:
|
||||
- cyclop
|
||||
- depguard
|
||||
|
||||
- revive
|
||||
- lll
|
||||
- gofmt
|
||||
- dupl
|
||||
- exhaustruct
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- funlen
|
||||
- tagliatelle
|
||||
- godox
|
||||
- ireturn
|
||||
- execinquery
|
||||
- exhaustruct
|
||||
- nolintlint
|
||||
- musttag # causes issues with imported libs
|
||||
- depguard
|
||||
- exportloopref
|
||||
|
||||
# We should strive to enable these:
|
||||
- wrapcheck
|
||||
- dupl
|
||||
- makezero
|
||||
- maintidx
|
||||
|
||||
# Limits the methods of an interface to 10. We have more in integration tests
|
||||
- interfacebloat
|
||||
|
||||
# We might want to enable this, but it might be a lot of work
|
||||
- cyclop
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- makezero
|
||||
- musttag
|
||||
- nestif
|
||||
- wsl # might be incompatible with gofumpt
|
||||
- testpackage
|
||||
- nolintlint
|
||||
- paralleltest
|
||||
- revive
|
||||
- tagliatelle
|
||||
- testpackage
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- wsl
|
||||
settings:
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- appendAssign
|
||||
- ifElseChain
|
||||
nlreturn:
|
||||
block-size: 4
|
||||
varnamelen:
|
||||
ignore-names:
|
||||
- err
|
||||
- db
|
||||
- id
|
||||
- ip
|
||||
- ok
|
||||
- c
|
||||
- tt
|
||||
- tx
|
||||
- rx
|
||||
- sb
|
||||
- wg
|
||||
- pr
|
||||
- p
|
||||
- p2
|
||||
ignore-type-assert-ok: true
|
||||
ignore-map-index-ok: true
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
- gen
|
||||
|
||||
linters-settings:
|
||||
varnamelen:
|
||||
ignore-type-assert-ok: true
|
||||
ignore-map-index-ok: true
|
||||
ignore-names:
|
||||
- err
|
||||
- db
|
||||
- id
|
||||
- ip
|
||||
- ok
|
||||
- c
|
||||
- tt
|
||||
- tx
|
||||
- rx
|
||||
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- appendAssign
|
||||
# TODO(kradalby): Remove this
|
||||
- ifElseChain
|
||||
|
||||
nlreturn:
|
||||
block-size: 4
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
- gen
|
||||
|
||||
100
.goreleaser.yml
100
.goreleaser.yml
@@ -2,11 +2,12 @@
|
||||
version: 2
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy -compat=1.22
|
||||
- go mod tidy -compat=1.24
|
||||
- go mod vendor
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
draft: true
|
||||
|
||||
builds:
|
||||
- id: headscale
|
||||
@@ -18,23 +19,22 @@ builds:
|
||||
- darwin_amd64
|
||||
- darwin_arm64
|
||||
- freebsd_amd64
|
||||
- linux_386
|
||||
- linux_amd64
|
||||
- linux_arm64
|
||||
- linux_arm_5
|
||||
- linux_arm_6
|
||||
- linux_arm_7
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
- -s -w
|
||||
- -X github.com/juanfont/headscale/hscontrol/types.Version={{ .Version }}
|
||||
- -X github.com/juanfont/headscale/hscontrol/types.GitCommitHash={{ .Commit }}
|
||||
tags:
|
||||
- ts2019
|
||||
|
||||
archives:
|
||||
- id: golang-cross
|
||||
name_template: '{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ with .Mips }}_{{ . }}{{ end }}{{ if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}'
|
||||
format: binary
|
||||
formats:
|
||||
- binary
|
||||
|
||||
source:
|
||||
enabled: true
|
||||
@@ -53,15 +53,22 @@ nfpms:
|
||||
# List file contents: dpkg -c dist/headscale...deb
|
||||
# Package metadata: dpkg --info dist/headscale....deb
|
||||
#
|
||||
- builds:
|
||||
- ids:
|
||||
- headscale
|
||||
package_name: headscale
|
||||
priority: optional
|
||||
vendor: headscale
|
||||
maintainer: Kristoffer Dalby <kristoffer@dalby.cc>
|
||||
homepage: https://github.com/juanfont/headscale
|
||||
license: BSD
|
||||
description: |-
|
||||
Open source implementation of the Tailscale control server.
|
||||
Headscale aims to implement a self-hosted, open source alternative to the
|
||||
Tailscale control server. Headscale's goal is to provide self-hosters and
|
||||
hobbyists with an open-source server they can use for their projects and
|
||||
labs. It implements a narrow scope, a single Tailscale network (tailnet),
|
||||
suitable for a personal use, or a small open-source organisation.
|
||||
bindir: /usr/bin
|
||||
section: net
|
||||
formats:
|
||||
- deb
|
||||
contents:
|
||||
@@ -70,19 +77,27 @@ nfpms:
|
||||
type: config|noreplace
|
||||
file_info:
|
||||
mode: 0644
|
||||
- src: ./docs/packaging/headscale.systemd.service
|
||||
- src: ./packaging/systemd/headscale.service
|
||||
dst: /usr/lib/systemd/system/headscale.service
|
||||
- dst: /var/lib/headscale
|
||||
type: dir
|
||||
- dst: /var/run/headscale
|
||||
type: dir
|
||||
- src: LICENSE
|
||||
dst: /usr/share/doc/headscale/copyright
|
||||
scripts:
|
||||
postinstall: ./docs/packaging/postinstall.sh
|
||||
postremove: ./docs/packaging/postremove.sh
|
||||
postinstall: ./packaging/deb/postinst
|
||||
postremove: ./packaging/deb/postrm
|
||||
preremove: ./packaging/deb/prerm
|
||||
deb:
|
||||
lintian_overrides:
|
||||
- no-changelog # Our CHANGELOG.md uses a different formatting
|
||||
- no-manual-page
|
||||
- statically-linked-binary
|
||||
|
||||
kos:
|
||||
- id: ghcr
|
||||
repository: ghcr.io/juanfont/headscale
|
||||
repositories:
|
||||
- ghcr.io/juanfont/headscale
|
||||
- headscale/headscale
|
||||
|
||||
# bare tells KO to only use the repository
|
||||
# for tagging and naming the container.
|
||||
@@ -94,32 +109,7 @@ kos:
|
||||
- CGO_ENABLED=0
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/386
|
||||
- linux/arm64
|
||||
- linux/arm/v7
|
||||
tags:
|
||||
- "{{ if not .Prerelease }}latest{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}.{{ .Patch }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}.{{ .Minor }}.{{ .Patch }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}.{{ .Minor }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}stable{{ else }}unstable{{ end }}"
|
||||
- "{{ .Tag }}"
|
||||
- '{{ trimprefix .Tag "v" }}'
|
||||
- "sha-{{ .ShortCommit }}"
|
||||
|
||||
- id: dockerhub
|
||||
build: headscale
|
||||
base_image: gcr.io/distroless/base-debian12
|
||||
repository: headscale/headscale
|
||||
bare: true
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/386
|
||||
- linux/arm64
|
||||
- linux/arm/v7
|
||||
tags:
|
||||
- "{{ if not .Prerelease }}latest{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}.{{ .Patch }}{{ end }}"
|
||||
@@ -134,7 +124,10 @@ kos:
|
||||
- "sha-{{ .ShortCommit }}"
|
||||
|
||||
- id: ghcr-debug
|
||||
repository: ghcr.io/juanfont/headscale
|
||||
repositories:
|
||||
- ghcr.io/juanfont/headscale
|
||||
- headscale/headscale
|
||||
|
||||
bare: true
|
||||
base_image: gcr.io/distroless/base-debian12:debug
|
||||
build: headscale
|
||||
@@ -143,32 +136,7 @@ kos:
|
||||
- CGO_ENABLED=0
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/386
|
||||
- linux/arm64
|
||||
- linux/arm/v7
|
||||
tags:
|
||||
- "{{ if not .Prerelease }}latest-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}.{{ .Patch }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}.{{ .Minor }}.{{ .Patch }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}.{{ .Minor }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}stable-debug{{ else }}unstable-debug{{ end }}"
|
||||
- "{{ .Tag }}-debug"
|
||||
- '{{ trimprefix .Tag "v" }}-debug'
|
||||
- "sha-{{ .ShortCommit }}-debug"
|
||||
|
||||
- id: dockerhub-debug
|
||||
build: headscale
|
||||
base_image: gcr.io/distroless/base-debian12:debug
|
||||
repository: headscale/headscale
|
||||
bare: true
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/386
|
||||
- linux/arm64
|
||||
- linux/arm/v7
|
||||
tags:
|
||||
- "{{ if not .Prerelease }}latest-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}.{{ .Patch }}-debug{{ end }}"
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
.github/workflows/test-integration-v2*
|
||||
docs/about/features.md
|
||||
docs/ref/configuration.md
|
||||
docs/ref/oidc.md
|
||||
docs/ref/remote-cli.md
|
||||
|
||||
346
CHANGELOG.md
346
CHANGELOG.md
@@ -2,7 +2,312 @@
|
||||
|
||||
## Next
|
||||
|
||||
## 0.24.0 (2024-xx-xx)
|
||||
**Minimum supported Tailscale client version: v1.64.0**
|
||||
|
||||
### Database integrity improvements
|
||||
|
||||
This release includes a significant database migration that addresses longstanding
|
||||
issues with the database schema and data integrity that has accumulated over the
|
||||
years. The migration introduces a `schema.sql` file as the source of truth for
|
||||
the expected database schema to ensure new migrations that will cause divergence
|
||||
does not occur again.
|
||||
|
||||
These issues arose from a combination of factors discovered over time: SQLite
|
||||
foreign keys not being enforced for many early versions, all migrations being
|
||||
run in one large function until version 0.23.0, and inconsistent use of GORM's
|
||||
AutoMigrate feature. Moving forward, all new migrations will be explicit SQL
|
||||
operations rather than relying on GORM AutoMigrate, and foreign keys will be
|
||||
enforced throughout the migration process.
|
||||
|
||||
We are only improving SQLite databases with this change - PostgreSQL databases
|
||||
are not affected.
|
||||
|
||||
Please read the [PR description](https://github.com/juanfont/headscale/pull/2617)
|
||||
for more technical details about the issues and solutions.
|
||||
|
||||
**SQLite Database Backup Example:**
|
||||
```bash
|
||||
# Stop headscale
|
||||
systemctl stop headscale
|
||||
|
||||
# Backup sqlite database
|
||||
cp /var/lib/headscale/db.sqlite /var/lib/headscale/db.sqlite.backup
|
||||
|
||||
# Backup sqlite WAL/SHM files (if they exist)
|
||||
cp /var/lib/headscale/db.sqlite-wal /var/lib/headscale/db.sqlite-wal.backup
|
||||
cp /var/lib/headscale/db.sqlite-shm /var/lib/headscale/db.sqlite-shm.backup
|
||||
|
||||
# Start headscale (migration will run automatically)
|
||||
systemctl start headscale
|
||||
```
|
||||
|
||||
### BREAKING
|
||||
|
||||
- Remove support for 32-bit binaries
|
||||
[#2692](https://github.com/juanfont/headscale/pull/2692)
|
||||
- Policy: Zero or empty destination port is no longer allowed
|
||||
[#2606](https://github.com/juanfont/headscale/pull/2606)
|
||||
|
||||
### Changes
|
||||
|
||||
- **Database schema migration improvements for SQLite**
|
||||
[#2617](https://github.com/juanfont/headscale/pull/2617)
|
||||
- **IMPORTANT: Backup your SQLite database before upgrading**
|
||||
- Introduces safer table renaming migration strategy
|
||||
- Addresses longstanding database integrity issues
|
||||
|
||||
- Remove policy v1 code [#2600](https://github.com/juanfont/headscale/pull/2600)
|
||||
- Refactor Debian/Ubuntu packaging and drop support for Ubuntu 20.04.
|
||||
[#2614](https://github.com/juanfont/headscale/pull/2614)
|
||||
- Support client verify for DERP
|
||||
[#2046](https://github.com/juanfont/headscale/pull/2046)
|
||||
- Remove redundant check regarding `noise` config
|
||||
[#2658](https://github.com/juanfont/headscale/pull/2658)
|
||||
- Refactor OpenID Connect documentation
|
||||
[#2625](https://github.com/juanfont/headscale/pull/2625)
|
||||
- Don't crash if config file is missing
|
||||
[#2656](https://github.com/juanfont/headscale/pull/2656)
|
||||
|
||||
## 0.26.1 (2025-06-06)
|
||||
|
||||
### Changes
|
||||
|
||||
- Ensure nodes are matching both node key and machine key when connecting.
|
||||
[#2642](https://github.com/juanfont/headscale/pull/2642)
|
||||
|
||||
## 0.26.0 (2025-05-14)
|
||||
|
||||
### BREAKING
|
||||
|
||||
#### Routes
|
||||
|
||||
Route internals have been rewritten, removing the dedicated route table in the
|
||||
database. This was done to simplify the codebase, which had grown unnecessarily
|
||||
complex after the routes were split into separate tables. The overhead of having
|
||||
to go via the database and keeping the state in sync made the code very hard to
|
||||
reason about and prone to errors. The majority of the route state is only
|
||||
relevant when headscale is running, and is now only kept in memory. As part of
|
||||
this, the CLI and API has been simplified to reflect the changes;
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | ts-head-ruqsg8 | | 0.0.0.0/0, ::/0 |
|
||||
2 | ts-unstable-fq7ob4 | | 0.0.0.0/0, ::/0 |
|
||||
|
||||
$ headscale nodes approve-routes --identifier 1 --routes 0.0.0.0/0,::/0
|
||||
Node updated
|
||||
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | ts-head-ruqsg8 | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0
|
||||
2 | ts-unstable-fq7ob4 | | 0.0.0.0/0, ::/0 |
|
||||
```
|
||||
|
||||
Note that if an exit route is approved (0.0.0.0/0 or ::/0), both IPv4 and IPv6
|
||||
will be approved.
|
||||
|
||||
- Route API and CLI has been removed
|
||||
[#2422](https://github.com/juanfont/headscale/pull/2422)
|
||||
- Routes are now managed via the Node API
|
||||
[#2422](https://github.com/juanfont/headscale/pull/2422)
|
||||
- Only routes accessible to the node will be sent to the node
|
||||
[#2561](https://github.com/juanfont/headscale/pull/2561)
|
||||
|
||||
#### Policy v2
|
||||
|
||||
This release introduces a new policy implementation. The new policy is a
|
||||
complete rewrite, and it introduces some significant quality and consistency
|
||||
improvements. In principle, there are not really any new features, but some long
|
||||
standing bugs should have been resolved, or be easier to fix in the future. The
|
||||
new policy code passes all of our tests.
|
||||
|
||||
**Changes**
|
||||
|
||||
- The policy is validated and "resolved" when loading, providing errors for
|
||||
invalid rules and conditions.
|
||||
- Previously this was done as a mix between load and runtime (when it was
|
||||
applied to a node).
|
||||
- This means that when you convert the first time, what was previously a
|
||||
policy that loaded, but failed at runtime, will now fail at load time.
|
||||
- Error messages should be more descriptive and informative.
|
||||
- There is still work to be here, but it is already improved with "typing"
|
||||
(e.g. only Users can be put in Groups)
|
||||
- All users must contain an `@` character.
|
||||
- If your user naturally contains and `@`, like an email, this will just work.
|
||||
- If its based on usernames, or other identifiers not containing an `@`, an
|
||||
`@` should be appended at the end. For example, if your user is `john`, it
|
||||
must be written as `john@` in the policy.
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Migration notes when the policy is stored in the database.</summary>
|
||||
|
||||
This section **only** applies if the policy is stored in the database and
|
||||
Headscale 0.26 doesn't start due to a policy error
|
||||
(`failed to load ACL policy`).
|
||||
|
||||
- Start Headscale 0.26 with the environment variable `HEADSCALE_POLICY_V1=1`
|
||||
set. You can check that Headscale picked up the environment variable by
|
||||
observing this message during startup: `Using policy manager version: 1`
|
||||
- Dump the policy to a file: `headscale policy get > policy.json`
|
||||
- Edit `policy.json` and migrate to policy V2. Use the command
|
||||
`headscale policy check --file policy.json` to check for policy errors.
|
||||
- Load the modified policy: `headscale policy set --file policy.json`
|
||||
- Restart Headscale **without** the environment variable `HEADSCALE_POLICY_V1`.
|
||||
Headscale should now print the message `Using policy manager version: 2` and
|
||||
startup successfully.
|
||||
|
||||
</details>
|
||||
|
||||
**SSH**
|
||||
|
||||
The SSH policy has been reworked to be more consistent with the rest of the
|
||||
policy. In addition, several inconsistencies between our implementation and
|
||||
Tailscale's upstream has been closed and this might be a breaking change for
|
||||
some users. Please refer to the
|
||||
[upstream documentation](https://tailscale.com/kb/1337/acl-syntax#tailscale-ssh)
|
||||
for more information on which types are allowed in `src`, `dst` and `users`.
|
||||
|
||||
There is one large inconsistency left, we allow `*` as a destination as we
|
||||
currently do not support `autogroup:self`, `autogroup:member` and
|
||||
`autogroup:tagged`. The support for `*` will be removed when we have support for
|
||||
the autogroups.
|
||||
|
||||
**Current state**
|
||||
|
||||
The new policy is passing all tests, both integration and unit tests. This does
|
||||
not mean it is perfect, but it is a good start. Corner cases that is currently
|
||||
working in v1 and not tested might be broken in v2 (and vice versa).
|
||||
|
||||
**We do need help testing this code**
|
||||
|
||||
#### Other breaking changes
|
||||
|
||||
- Disallow `server_url` and `base_domain` to be equal
|
||||
[#2544](https://github.com/juanfont/headscale/pull/2544)
|
||||
- Return full user in API for pre auth keys instead of string
|
||||
[#2542](https://github.com/juanfont/headscale/pull/2542)
|
||||
- Pre auth key API/CLI now uses ID over username
|
||||
[#2542](https://github.com/juanfont/headscale/pull/2542)
|
||||
- A non-empty list of global nameservers needs to be specified via
|
||||
`dns.nameservers.global` if the configuration option `dns.override_local_dns`
|
||||
is enabled or is not specified in the configuration file. This aligns with
|
||||
behaviour of tailscale.com.
|
||||
[#2438](https://github.com/juanfont/headscale/pull/2438)
|
||||
|
||||
### Changes
|
||||
|
||||
- Use Go 1.24 [#2427](https://github.com/juanfont/headscale/pull/2427)
|
||||
- Add `headscale policy check` command to check policy
|
||||
[#2553](https://github.com/juanfont/headscale/pull/2553)
|
||||
- `oidc.map_legacy_users` and `oidc.strip_email_domain` has been removed
|
||||
[#2411](https://github.com/juanfont/headscale/pull/2411)
|
||||
- Add more information to `/debug` endpoint
|
||||
[#2420](https://github.com/juanfont/headscale/pull/2420)
|
||||
- It is now possible to inspect running goroutines and take profiles
|
||||
- View of config, policy, filter, ssh policy per node, connected nodes and
|
||||
DERPmap
|
||||
- OIDC: Fetch UserInfo to get EmailVerified if necessary
|
||||
[#2493](https://github.com/juanfont/headscale/pull/2493)
|
||||
- If a OIDC provider doesn't include the `email_verified` claim in its ID
|
||||
tokens, Headscale will attempt to get it from the UserInfo endpoint.
|
||||
- OIDC: Try to populate name, email and username from UserInfo
|
||||
[#2545](https://github.com/juanfont/headscale/pull/2545)
|
||||
- Improve performance by only querying relevant nodes from the database for node
|
||||
updates [#2509](https://github.com/juanfont/headscale/pull/2509)
|
||||
- node FQDNs in the netmap will now contain a dot (".") at the end. This aligns
|
||||
with behaviour of tailscale.com
|
||||
[#2503](https://github.com/juanfont/headscale/pull/2503)
|
||||
- Restore support for "Override local DNS"
|
||||
[#2438](https://github.com/juanfont/headscale/pull/2438)
|
||||
- Add documentation for routes
|
||||
[#2496](https://github.com/juanfont/headscale/pull/2496)
|
||||
- Add support for `autogroup:member`, `autogroup:tagged`
|
||||
[#2572](https://github.com/juanfont/headscale/pull/2572)
|
||||
|
||||
## 0.25.1 (2025-02-25)
|
||||
|
||||
### Changes
|
||||
|
||||
- Fix issue where registration errors are sent correctly
|
||||
[#2435](https://github.com/juanfont/headscale/pull/2435)
|
||||
- Fix issue where routes passed on registration were not saved
|
||||
[#2444](https://github.com/juanfont/headscale/pull/2444)
|
||||
- Fix issue where registration page was displayed twice
|
||||
[#2445](https://github.com/juanfont/headscale/pull/2445)
|
||||
|
||||
## 0.25.0 (2025-02-11)
|
||||
|
||||
### BREAKING
|
||||
|
||||
- Authentication flow has been rewritten
|
||||
[#2374](https://github.com/juanfont/headscale/pull/2374) This change should be
|
||||
transparent to users with the exception of some buxfixes that has been
|
||||
discovered and was fixed as part of the rewrite.
|
||||
- When a node is registered with _a new user_, it will be registered as a new
|
||||
node ([#2327](https://github.com/juanfont/headscale/issues/2327) and
|
||||
[#1310](https://github.com/juanfont/headscale/issues/1310)).
|
||||
- A logged out node logging in with the same user will replace the existing
|
||||
node.
|
||||
- Remove support for Tailscale clients older than 1.62 (Capability version 87)
|
||||
[#2405](https://github.com/juanfont/headscale/pull/2405)
|
||||
|
||||
### Changes
|
||||
|
||||
- `oidc.map_legacy_users` is now `false` by default
|
||||
[#2350](https://github.com/juanfont/headscale/pull/2350)
|
||||
- Print Tailscale version instead of capability versions for outdated nodes
|
||||
[#2391](https://github.com/juanfont/headscale/pull/2391)
|
||||
- Do not allow renaming of users from OIDC
|
||||
[#2393](https://github.com/juanfont/headscale/pull/2393)
|
||||
- Change minimum hostname length to 2
|
||||
[#2393](https://github.com/juanfont/headscale/pull/2393)
|
||||
- Fix migration error caused by nodes having invalid auth keys
|
||||
[#2412](https://github.com/juanfont/headscale/pull/2412)
|
||||
- Pre auth keys belonging to a user are no longer deleted with the user
|
||||
[#2396](https://github.com/juanfont/headscale/pull/2396)
|
||||
- Pre auth keys that are used by a node can no longer be deleted
|
||||
[#2396](https://github.com/juanfont/headscale/pull/2396)
|
||||
- Rehaul HTTP errors, return better status code and errors to users
|
||||
[#2398](https://github.com/juanfont/headscale/pull/2398)
|
||||
- Print headscale version and commit on server startup
|
||||
[#2415](https://github.com/juanfont/headscale/pull/2415)
|
||||
|
||||
## 0.24.3 (2025-02-07)
|
||||
|
||||
### Changes
|
||||
|
||||
- Fix migration error caused by nodes having invalid auth keys
|
||||
[#2412](https://github.com/juanfont/headscale/pull/2412)
|
||||
- Pre auth keys belonging to a user are no longer deleted with the user
|
||||
[#2396](https://github.com/juanfont/headscale/pull/2396)
|
||||
- Pre auth keys that are used by a node can no longer be deleted
|
||||
[#2396](https://github.com/juanfont/headscale/pull/2396)
|
||||
|
||||
## 0.24.2 (2025-01-30)
|
||||
|
||||
### Changes
|
||||
|
||||
- Fix issue where email and username being equal fails to match in Policy
|
||||
[#2388](https://github.com/juanfont/headscale/pull/2388)
|
||||
- Delete invalid routes before adding a NOT NULL constraint on node_id
|
||||
[#2386](https://github.com/juanfont/headscale/pull/2386)
|
||||
|
||||
## 0.24.1 (2025-01-23)
|
||||
|
||||
### Changes
|
||||
|
||||
- Fix migration issue with user table for PostgreSQL
|
||||
[#2367](https://github.com/juanfont/headscale/pull/2367)
|
||||
- Relax username validation to allow emails
|
||||
[#2364](https://github.com/juanfont/headscale/pull/2364)
|
||||
- Remove invalid routes and add stronger constraints for routes to avoid API
|
||||
panic [#2371](https://github.com/juanfont/headscale/pull/2371)
|
||||
- Fix panic when `derp.update_frequency` is 0
|
||||
[#2368](https://github.com/juanfont/headscale/pull/2368)
|
||||
|
||||
## 0.24.0 (2025-01-17)
|
||||
|
||||
### Security fix: OIDC changes in Headscale 0.24.0
|
||||
|
||||
@@ -33,8 +338,7 @@ and have it populate to Headscale automatically the next time they log in.
|
||||
However, this may affect the way you reference users in policies.
|
||||
|
||||
Headscale v0.23.0 and earlier never recorded the `iss` and `sub` fields, so all
|
||||
legacy (existing) OIDC accounts _need to be migrated_ to be properly
|
||||
secured.
|
||||
legacy (existing) OIDC accounts _need to be migrated_ to be properly secured.
|
||||
|
||||
#### What do I need to do to migrate?
|
||||
|
||||
@@ -46,8 +350,8 @@ The migration will mostly be done automatically, with one exception. If your
|
||||
OIDC does not provide an `email_verified` claim, Headscale will ignore the
|
||||
`email`. This means that either the administrator will have to mark the user
|
||||
emails as verified, or ensure the users verify their emails. Any unverified
|
||||
emails will be ignored, meaning that the users will get new accounts instead
|
||||
of being migrated.
|
||||
emails will be ignored, meaning that the users will get new accounts instead of
|
||||
being migrated.
|
||||
|
||||
After this exception is ensured, make all users log into Headscale with their
|
||||
account, and Headscale will automatically update the account record. This will
|
||||
@@ -148,12 +452,13 @@ This will also affect the way you
|
||||
- User gRPC/API [#2261](https://github.com/juanfont/headscale/pull/2261):
|
||||
- If you depend on a Headscale Web UI, you should wait with this update until
|
||||
the UI have been updated to match the new API.
|
||||
- `GET /api/v1/user/{name}` and `GetUser` have been removed in favour of `ListUsers` with an ID parameter
|
||||
- `GET /api/v1/user/{name}` and `GetUser` have been removed in favour of
|
||||
`ListUsers` with an ID parameter
|
||||
- `RenameUser` and `DeleteUser` now require an ID instead of a name.
|
||||
|
||||
### Changes
|
||||
|
||||
- Improved compatibilty of built-in DERP server with clients connecting over
|
||||
- Improved compatibility of built-in DERP server with clients connecting over
|
||||
WebSocket [#2132](https://github.com/juanfont/headscale/pull/2132)
|
||||
- Allow nodes to use SSH agent forwarding
|
||||
[#2145](https://github.com/juanfont/headscale/pull/2145)
|
||||
@@ -164,13 +469,18 @@ This will also affect the way you
|
||||
- Fixed updating of hostname and givenName when it is updated in HostInfo
|
||||
[#2199](https://github.com/juanfont/headscale/pull/2199)
|
||||
- Fixed missing `stable-debug` container tag
|
||||
[#2232](https://github.com/juanfont/headscale/pr/2232)
|
||||
[#2232](https://github.com/juanfont/headscale/pull/2232)
|
||||
- Loosened up `server_url` and `base_domain` check. It was overly strict in some
|
||||
cases. [#2248](https://github.com/juanfont/headscale/pull/2248)
|
||||
- CLI for managing users now accepts `--identifier` in addition to `--name`,
|
||||
usage of `--identifier` is recommended
|
||||
[#2261](https://github.com/juanfont/headscale/pull/2261)
|
||||
- Add `dns.extra_records_path` configuration option [#2262](https://github.com/juanfont/headscale/issues/2262)
|
||||
- Add `dns.extra_records_path` configuration option
|
||||
[#2262](https://github.com/juanfont/headscale/issues/2262)
|
||||
- Support client verify for DERP
|
||||
[#2046](https://github.com/juanfont/headscale/pull/2046)
|
||||
- Add PKCE Verifier for OIDC
|
||||
[#2314](https://github.com/juanfont/headscale/pull/2314)
|
||||
|
||||
## 0.23.0 (2024-09-18)
|
||||
|
||||
@@ -253,14 +563,14 @@ part of adopting [#1460](https://github.com/juanfont/headscale/pull/1460).
|
||||
- Entrypoint of container image has changed from shell to headscale, require
|
||||
change from `headscale serve` to `serve`
|
||||
- `/var/lib/headscale` and `/var/run/headscale` is no longer created
|
||||
automatically, see [container docs](./docs/running-headscale-container.md)
|
||||
automatically, see [container docs](./docs/setup/install/container.md)
|
||||
- Prefixes are now defined per v4 and v6 range.
|
||||
[#1756](https://github.com/juanfont/headscale/pull/1756)
|
||||
- `ip_prefixes` option is now `prefixes.v4` and `prefixes.v6`
|
||||
- `prefixes.allocation` can be set to assign IPs at `sequential` or `random`.
|
||||
[#1869](https://github.com/juanfont/headscale/pull/1869)
|
||||
- MagicDNS domains no longer contain usernames []()
|
||||
- This is in preperation to fix Headscales implementation of tags which
|
||||
- This is in preparation to fix Headscales implementation of tags which
|
||||
currently does not correctly remove the link between a tagged device and a
|
||||
user. As tagged devices will not have a user, this will require a change to
|
||||
the DNS generation, removing the username, see
|
||||
@@ -346,7 +656,7 @@ part of adopting [#1460](https://github.com/juanfont/headscale/pull/1460).
|
||||
[#1391](https://github.com/juanfont/headscale/pull/1391)
|
||||
- Improvements on Noise implementation
|
||||
[#1379](https://github.com/juanfont/headscale/pull/1379)
|
||||
- Replace node filter logic, ensuring nodes with access can see eachother
|
||||
- Replace node filter logic, ensuring nodes with access can see each other
|
||||
[#1381](https://github.com/juanfont/headscale/pull/1381)
|
||||
- Disable (or delete) both exit routes at the same time
|
||||
[#1428](https://github.com/juanfont/headscale/pull/1428)
|
||||
@@ -643,7 +953,7 @@ part of adopting [#1460](https://github.com/juanfont/headscale/pull/1460).
|
||||
|
||||
- Boundaries between Namespaces has been removed and all nodes can communicate
|
||||
by default [#357](https://github.com/juanfont/headscale/pull/357)
|
||||
- To limit access between nodes, use [ACLs](./docs/acls.md).
|
||||
- To limit access between nodes, use [ACLs](./docs/ref/acls.md).
|
||||
- `/metrics` is now a configurable host:port endpoint:
|
||||
[#344](https://github.com/juanfont/headscale/pull/344). You must update your
|
||||
`config.yaml` file to include:
|
||||
@@ -701,13 +1011,12 @@ behaviour.
|
||||
- All machines can communicate with all machines by default
|
||||
- Tags should now work correctly and adding a host to Headscale should now
|
||||
reload the rules.
|
||||
- The documentation have a [fictional example](docs/acls.md) that should cover
|
||||
some use cases of the ACLs features
|
||||
- The documentation have a [fictional example](./docs/ref/acls.md) that should
|
||||
cover some use cases of the ACLs features
|
||||
|
||||
### Features
|
||||
|
||||
- Add support for configurable mTLS
|
||||
[docs](docs/tls.md#configuring-mutual-tls-authentication-mtls)
|
||||
- Add support for configurable mTLS [docs](./docs/ref/tls.md)
|
||||
[#297](https://github.com/juanfont/headscale/pull/297)
|
||||
|
||||
### Changes
|
||||
@@ -721,7 +1030,8 @@ behaviour.
|
||||
|
||||
- Add IPv6 support to the prefix assigned to namespaces
|
||||
- Add API Key support
|
||||
- Enable remote control of `headscale` via CLI [docs](docs/remote-cli.md)
|
||||
- Enable remote control of `headscale` via CLI
|
||||
[docs](./docs/ref/remote-cli.md)
|
||||
- Enable HTTP API (beta, subject to change)
|
||||
- OpenID Connect users will be mapped per namespaces
|
||||
- Each user will get its own namespace, created if it does not exist
|
||||
|
||||
395
CLAUDE.md
Normal file
395
CLAUDE.md
Normal file
@@ -0,0 +1,395 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Overview
|
||||
|
||||
Headscale is an open-source implementation of the Tailscale control server written in Go. It provides self-hosted coordination for Tailscale networks (tailnets), managing node registration, IP allocation, policy enforcement, and DERP routing.
|
||||
|
||||
## Development Commands
|
||||
|
||||
### Quick Setup
|
||||
```bash
|
||||
# Recommended: Use Nix for dependency management
|
||||
nix develop
|
||||
|
||||
# Full development workflow
|
||||
make dev # runs fmt + lint + test + build
|
||||
```
|
||||
|
||||
### Essential Commands
|
||||
```bash
|
||||
# Build headscale binary
|
||||
make build
|
||||
|
||||
# Run tests
|
||||
make test
|
||||
go test ./... # All unit tests
|
||||
go test -race ./... # With race detection
|
||||
|
||||
# Run specific integration test
|
||||
go run ./cmd/hi run "TestName" --postgres
|
||||
|
||||
# Code formatting and linting
|
||||
make fmt # Format all code (Go, docs, proto)
|
||||
make lint # Lint all code (Go, proto)
|
||||
make fmt-go # Format Go code only
|
||||
make lint-go # Lint Go code only
|
||||
|
||||
# Protocol buffer generation (after modifying proto/)
|
||||
make generate
|
||||
|
||||
# Clean build artifacts
|
||||
make clean
|
||||
```
|
||||
|
||||
### Integration Testing
|
||||
```bash
|
||||
# Use the hi (Headscale Integration) test runner
|
||||
go run ./cmd/hi doctor # Check system requirements
|
||||
go run ./cmd/hi run "TestPattern" # Run specific test
|
||||
go run ./cmd/hi run "TestPattern" --postgres # With PostgreSQL backend
|
||||
|
||||
# Test artifacts are saved to control_logs/ with logs and debug data
|
||||
```
|
||||
|
||||
## Project Structure & Architecture
|
||||
|
||||
### Top-Level Organization
|
||||
|
||||
```
|
||||
headscale/
|
||||
├── cmd/ # Command-line applications
|
||||
│ ├── headscale/ # Main headscale server binary
|
||||
│ └── hi/ # Headscale Integration test runner
|
||||
├── hscontrol/ # Core control plane logic
|
||||
├── integration/ # End-to-end Docker-based tests
|
||||
├── proto/ # Protocol buffer definitions
|
||||
├── gen/ # Generated code (protobuf)
|
||||
├── docs/ # Documentation
|
||||
└── packaging/ # Distribution packaging
|
||||
```
|
||||
|
||||
### Core Packages (`hscontrol/`)
|
||||
|
||||
**Main Server (`hscontrol/`)**
|
||||
- `app.go`: Application setup, dependency injection, server lifecycle
|
||||
- `handlers.go`: HTTP/gRPC API endpoints for management operations
|
||||
- `grpcv1.go`: gRPC service implementation for headscale API
|
||||
- `poll.go`: **Critical** - Handles Tailscale MapRequest/MapResponse protocol
|
||||
- `noise.go`: Noise protocol implementation for secure client communication
|
||||
- `auth.go`: Authentication flows (web, OIDC, command-line)
|
||||
- `oidc.go`: OpenID Connect integration for user authentication
|
||||
|
||||
**State Management (`hscontrol/state/`)**
|
||||
- `state.go`: Central coordinator for all subsystems (database, policy, IP allocation, DERP)
|
||||
- `node_store.go`: **Performance-critical** - In-memory cache with copy-on-write semantics
|
||||
- Thread-safe operations with deadlock detection
|
||||
- Coordinates between database persistence and real-time operations
|
||||
|
||||
**Database Layer (`hscontrol/db/`)**
|
||||
- `db.go`: Database abstraction, GORM setup, migration management
|
||||
- `node.go`: Node lifecycle, registration, expiration, IP assignment
|
||||
- `users.go`: User management, namespace isolation
|
||||
- `api_key.go`: API authentication tokens
|
||||
- `preauth_keys.go`: Pre-authentication keys for automated node registration
|
||||
- `ip.go`: IP address allocation and management
|
||||
- `policy.go`: Policy storage and retrieval
|
||||
- Schema migrations in `schema.sql` with extensive test data coverage
|
||||
|
||||
**Policy Engine (`hscontrol/policy/`)**
|
||||
- `policy.go`: Core ACL evaluation logic, HuJSON parsing
|
||||
- `v2/`: Next-generation policy system with improved filtering
|
||||
- `matcher/`: ACL rule matching and evaluation engine
|
||||
- Determines peer visibility, route approval, and network access rules
|
||||
- Supports both file-based and database-stored policies
|
||||
|
||||
**Network Management (`hscontrol/`)**
|
||||
- `derp/`: DERP (Designated Encrypted Relay for Packets) server implementation
|
||||
- NAT traversal when direct connections fail
|
||||
- Fallback relay for firewall-restricted environments
|
||||
- `mapper/`: Converts internal Headscale state to Tailscale's wire protocol format
|
||||
- `tail.go`: Tailscale-specific data structure generation
|
||||
- `routes/`: Subnet route management and primary route selection
|
||||
- `dns/`: DNS record management and MagicDNS implementation
|
||||
|
||||
**Utilities & Support (`hscontrol/`)**
|
||||
- `types/`: Core data structures, configuration, validation
|
||||
- `util/`: Helper functions for networking, DNS, key management
|
||||
- `templates/`: Client configuration templates (Apple, Windows, etc.)
|
||||
- `notifier/`: Event notification system for real-time updates
|
||||
- `metrics.go`: Prometheus metrics collection
|
||||
- `capver/`: Tailscale capability version management
|
||||
|
||||
### Key Subsystem Interactions
|
||||
|
||||
**Node Registration Flow**
|
||||
1. **Client Connection**: `noise.go` handles secure protocol handshake
|
||||
2. **Authentication**: `auth.go` validates credentials (web/OIDC/preauth)
|
||||
3. **State Creation**: `state.go` coordinates IP allocation via `db/ip.go`
|
||||
4. **Storage**: `db/node.go` persists node, `NodeStore` caches in memory
|
||||
5. **Network Setup**: `mapper/` generates initial Tailscale network map
|
||||
|
||||
**Ongoing Operations**
|
||||
1. **Poll Requests**: `poll.go` receives periodic client updates
|
||||
2. **State Updates**: `NodeStore` maintains real-time node information
|
||||
3. **Policy Application**: `policy/` evaluates ACL rules for peer relationships
|
||||
4. **Map Distribution**: `mapper/` sends network topology to all affected clients
|
||||
|
||||
**Route Management**
|
||||
1. **Advertisement**: Clients announce routes via `poll.go` Hostinfo updates
|
||||
2. **Storage**: `db/` persists routes, `NodeStore` caches for performance
|
||||
3. **Approval**: `policy/` auto-approves routes based on ACL rules
|
||||
4. **Distribution**: `routes/` selects primary routes, `mapper/` distributes to peers
|
||||
|
||||
### Command-Line Tools (`cmd/`)
|
||||
|
||||
**Main Server (`cmd/headscale/`)**
|
||||
- `headscale.go`: CLI parsing, configuration loading, server startup
|
||||
- Supports daemon mode, CLI operations (user/node management), database operations
|
||||
|
||||
**Integration Test Runner (`cmd/hi/`)**
|
||||
- `main.go`: Test execution framework with Docker orchestration
|
||||
- `run.go`: Individual test execution with artifact collection
|
||||
- `doctor.go`: System requirements validation
|
||||
- `docker.go`: Container lifecycle management
|
||||
- Essential for validating changes against real Tailscale clients
|
||||
|
||||
### Generated & External Code
|
||||
|
||||
**Protocol Buffers (`proto/` → `gen/`)**
|
||||
- Defines gRPC API for headscale management operations
|
||||
- Client libraries can generate from these definitions
|
||||
- Run `make generate` after modifying `.proto` files
|
||||
|
||||
**Integration Testing (`integration/`)**
|
||||
- `scenario.go`: Docker test environment setup
|
||||
- `tailscale.go`: Tailscale client container management
|
||||
- Individual test files for specific functionality areas
|
||||
- Real end-to-end validation with network isolation
|
||||
|
||||
### Critical Performance Paths
|
||||
|
||||
**High-Frequency Operations**
|
||||
1. **MapRequest Processing** (`poll.go`): Every 15-60 seconds per client
|
||||
2. **NodeStore Reads** (`node_store.go`): Every operation requiring node data
|
||||
3. **Policy Evaluation** (`policy/`): On every peer relationship calculation
|
||||
4. **Route Lookups** (`routes/`): During network map generation
|
||||
|
||||
**Database Write Patterns**
|
||||
- **Frequent**: Node heartbeats, endpoint updates, route changes
|
||||
- **Moderate**: User operations, policy updates, API key management
|
||||
- **Rare**: Schema migrations, bulk operations
|
||||
|
||||
### Configuration & Deployment
|
||||
|
||||
**Configuration** (`hscontrol/types/config.go`)**
|
||||
- Database connection settings (SQLite/PostgreSQL)
|
||||
- Network configuration (IP ranges, DNS settings)
|
||||
- Policy mode (file vs database)
|
||||
- DERP relay configuration
|
||||
- OIDC provider settings
|
||||
|
||||
**Key Dependencies**
|
||||
- **GORM**: Database ORM with migration support
|
||||
- **Tailscale Libraries**: Core networking and protocol code
|
||||
- **Zerolog**: Structured logging throughout the application
|
||||
- **Buf**: Protocol buffer toolchain for code generation
|
||||
|
||||
### Development Workflow Integration
|
||||
|
||||
The architecture supports incremental development:
|
||||
- **Unit Tests**: Focus on individual packages (`*_test.go` files)
|
||||
- **Integration Tests**: Validate cross-component interactions
|
||||
- **Database Tests**: Extensive migration and data integrity validation
|
||||
- **Policy Tests**: ACL rule evaluation and edge cases
|
||||
- **Performance Tests**: NodeStore and high-frequency operation validation
|
||||
|
||||
## Integration Test System
|
||||
|
||||
### Overview
|
||||
Integration tests use Docker containers running real Tailscale clients against a Headscale server. Tests validate end-to-end functionality including routing, ACLs, node lifecycle, and network coordination.
|
||||
|
||||
### Running Integration Tests
|
||||
|
||||
**System Requirements**
|
||||
```bash
|
||||
# Check if your system is ready
|
||||
go run ./cmd/hi doctor
|
||||
```
|
||||
This verifies Docker, Go, required images, and disk space.
|
||||
|
||||
**Test Execution Patterns**
|
||||
```bash
|
||||
# Run a single test (recommended for development)
|
||||
go run ./cmd/hi run "TestSubnetRouterMultiNetwork"
|
||||
|
||||
# Run with PostgreSQL backend (for database-heavy tests)
|
||||
go run ./cmd/hi run "TestExpireNode" --postgres
|
||||
|
||||
# Run multiple tests with pattern matching
|
||||
go run ./cmd/hi run "TestSubnet*"
|
||||
|
||||
# Run all integration tests (CI/full validation)
|
||||
go test ./integration -timeout 30m
|
||||
```
|
||||
|
||||
**Test Categories & Timing**
|
||||
- **Fast tests** (< 2 min): Basic functionality, CLI operations
|
||||
- **Medium tests** (2-5 min): Route management, ACL validation
|
||||
- **Slow tests** (5+ min): Node expiration, HA failover
|
||||
- **Long-running tests** (10+ min): `TestNodeOnlineStatus` (12 min duration)
|
||||
|
||||
### Test Infrastructure
|
||||
|
||||
**Docker Setup**
|
||||
- Headscale server container with configurable database backend
|
||||
- Multiple Tailscale client containers with different versions
|
||||
- Isolated networks per test scenario
|
||||
- Automatic cleanup after test completion
|
||||
|
||||
**Test Artifacts**
|
||||
All test runs save artifacts to `control_logs/TIMESTAMP-ID/`:
|
||||
```
|
||||
control_logs/20250713-213106-iajsux/
|
||||
├── hs-testname-abc123.stderr.log # Headscale server logs
|
||||
├── hs-testname-abc123.stdout.log
|
||||
├── hs-testname-abc123.db # Database snapshot
|
||||
├── hs-testname-abc123_metrics.txt # Prometheus metrics
|
||||
├── hs-testname-abc123-mapresponses/ # Protocol debug data
|
||||
├── ts-client-xyz789.stderr.log # Tailscale client logs
|
||||
├── ts-client-xyz789.stdout.log
|
||||
└── ts-client-xyz789_status.json # Client status dump
|
||||
```
|
||||
|
||||
### Test Development Guidelines
|
||||
|
||||
**Timing Considerations**
|
||||
Integration tests involve real network operations and Docker container lifecycle:
|
||||
|
||||
```go
|
||||
// ❌ Wrong: Immediate assertions after async operations
|
||||
client.Execute([]string{"tailscale", "set", "--advertise-routes=10.0.0.0/24"})
|
||||
nodes, _ := headscale.ListNodes()
|
||||
require.Len(t, nodes[0].GetAvailableRoutes(), 1) // May fail due to timing
|
||||
|
||||
// ✅ Correct: Wait for async operations to complete
|
||||
client.Execute([]string{"tailscale", "set", "--advertise-routes=10.0.0.0/24"})
|
||||
require.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes[0].GetAvailableRoutes(), 1)
|
||||
}, 10*time.Second, 100*time.Millisecond, "route should be advertised")
|
||||
```
|
||||
|
||||
**Common Test Patterns**
|
||||
- **Route Advertisement**: Use `EventuallyWithT` for route propagation
|
||||
- **Node State Changes**: Wait for NodeStore synchronization
|
||||
- **ACL Policy Changes**: Allow time for policy recalculation
|
||||
- **Network Connectivity**: Use ping tests with retries
|
||||
|
||||
**Test Data Management**
|
||||
```go
|
||||
// Node identification: Don't assume array ordering
|
||||
expectedRoutes := map[string]string{"1": "10.33.0.0/16"}
|
||||
for _, node := range nodes {
|
||||
nodeIDStr := fmt.Sprintf("%d", node.GetId())
|
||||
if route, shouldHaveRoute := expectedRoutes[nodeIDStr]; shouldHaveRoute {
|
||||
// Test the node that should have the route
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Troubleshooting Integration Tests
|
||||
|
||||
**Common Failure Patterns**
|
||||
1. **Timing Issues**: Test assertions run before async operations complete
|
||||
- **Solution**: Use `EventuallyWithT` with appropriate timeouts
|
||||
- **Timeout Guidelines**: 3-5s for route operations, 10s for complex scenarios
|
||||
|
||||
2. **Infrastructure Problems**: Disk space, Docker issues, network conflicts
|
||||
- **Check**: `go run ./cmd/hi doctor` for system health
|
||||
- **Clean**: Remove old test containers and networks
|
||||
|
||||
3. **NodeStore Synchronization**: Tests expecting immediate data availability
|
||||
- **Key Points**: Route advertisements must propagate through poll requests
|
||||
- **Fix**: Wait for NodeStore updates after Hostinfo changes
|
||||
|
||||
4. **Database Backend Differences**: SQLite vs PostgreSQL behavior differences
|
||||
- **Use**: `--postgres` flag for database-intensive tests
|
||||
- **Note**: Some timing characteristics differ between backends
|
||||
|
||||
**Debugging Failed Tests**
|
||||
1. **Check test artifacts** in `control_logs/` for detailed logs
|
||||
2. **Examine MapResponse JSON** files for protocol-level debugging
|
||||
3. **Review Headscale stderr logs** for server-side error messages
|
||||
4. **Check Tailscale client status** for network-level issues
|
||||
|
||||
**Resource Management**
|
||||
- Tests require significant disk space (each run ~100MB of logs)
|
||||
- Docker containers are cleaned up automatically on success
|
||||
- Failed tests may leave containers running - clean manually if needed
|
||||
- Use `docker system prune` periodically to reclaim space
|
||||
|
||||
### Best Practices for Test Modifications
|
||||
|
||||
1. **Always test locally** before committing integration test changes
|
||||
2. **Use appropriate timeouts** - too short causes flaky tests, too long slows CI
|
||||
3. **Clean up properly** - ensure tests don't leave persistent state
|
||||
4. **Handle both success and failure paths** in test scenarios
|
||||
5. **Document timing requirements** for complex test scenarios
|
||||
|
||||
## NodeStore Implementation Details
|
||||
|
||||
**Key Insight from Recent Work**: The NodeStore is a critical performance optimization that caches node data in memory while ensuring consistency with the database. When working with route advertisements or node state changes:
|
||||
|
||||
1. **Timing Considerations**: Route advertisements need time to propagate from clients to server. Use `require.EventuallyWithT()` patterns in tests instead of immediate assertions.
|
||||
|
||||
2. **Synchronization Points**: NodeStore updates happen at specific points like `poll.go:420` after Hostinfo changes. Ensure these are maintained when modifying the polling logic.
|
||||
|
||||
3. **Peer Visibility**: The NodeStore's `peersFunc` determines which nodes are visible to each other. Policy-based filtering is separate from monitoring visibility - expired nodes should remain visible for debugging but marked as expired.
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
### Integration Test Patterns
|
||||
```go
|
||||
// Use EventuallyWithT for async operations
|
||||
require.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
// Check expected state
|
||||
}, 10*time.Second, 100*time.Millisecond, "description")
|
||||
|
||||
// Node route checking by actual node properties, not array position
|
||||
var routeNode *v1.Node
|
||||
for _, node := range nodes {
|
||||
if nodeIDStr := fmt.Sprintf("%d", node.GetId()); expectedRoutes[nodeIDStr] != "" {
|
||||
routeNode = node
|
||||
break
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Running Problematic Tests
|
||||
- Some tests require significant time (e.g., `TestNodeOnlineStatus` runs for 12 minutes)
|
||||
- Infrastructure issues like disk space can cause test failures unrelated to code changes
|
||||
- Use `--postgres` flag when testing database-heavy scenarios
|
||||
|
||||
## Important Notes
|
||||
|
||||
- **Dependencies**: Use `nix develop` for consistent toolchain (Go, buf, protobuf tools, linting)
|
||||
- **Protocol Buffers**: Changes to `proto/` require `make generate` and should be committed separately
|
||||
- **Code Style**: Enforced via golangci-lint with golines (width 88) and gofumpt formatting
|
||||
- **Database**: Supports both SQLite (development) and PostgreSQL (production/testing)
|
||||
- **Integration Tests**: Require Docker and can consume significant disk space
|
||||
- **Performance**: NodeStore optimizations are critical for scale - be careful with changes to state management
|
||||
|
||||
## Debugging Integration Tests
|
||||
|
||||
Test artifacts are preserved in `control_logs/TIMESTAMP-ID/` including:
|
||||
- Headscale server logs (stderr/stdout)
|
||||
- Tailscale client logs and status
|
||||
- Database dumps and network captures
|
||||
- MapResponse JSON files for protocol debugging
|
||||
|
||||
When tests fail, check these artifacts first before assuming code issues.
|
||||
@@ -2,7 +2,7 @@
|
||||
# and are in no way endorsed by Headscale's maintainers as an
|
||||
# official nor supported release or distribution.
|
||||
|
||||
FROM docker.io/golang:1.23-bookworm
|
||||
FROM docker.io/golang:1.24-bookworm
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
@@ -13,14 +13,18 @@ RUN apt-get update \
|
||||
&& apt-get clean
|
||||
RUN mkdir -p /var/run/headscale
|
||||
|
||||
# Install delve debugger
|
||||
RUN go install github.com/go-delve/delve/cmd/dlv@latest
|
||||
|
||||
COPY go.mod go.sum /go/src/headscale/
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale && test -e /go/bin/headscale
|
||||
# Build debug binary with debug symbols for delve
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -gcflags="all=-N -l" -o /go/bin/headscale ./cmd/headscale
|
||||
|
||||
# Need to reset the entrypoint or everything will run as a busybox script
|
||||
ENTRYPOINT []
|
||||
EXPOSE 8080/tcp
|
||||
CMD ["headscale"]
|
||||
EXPOSE 8080/tcp 40000/tcp
|
||||
CMD ["/go/bin/dlv", "--listen=0.0.0.0:40000", "--headless=true", "--api-version=2", "--accept-multiclient", "exec", "/go/bin/headscale", "--"]
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# This Dockerfile is more or less lifted from tailscale/tailscale
|
||||
# to ensure a similar build process when testing the HEAD of tailscale.
|
||||
|
||||
FROM golang:1.23-alpine AS build-env
|
||||
FROM golang:1.24-alpine AS build-env
|
||||
|
||||
WORKDIR /go/src
|
||||
|
||||
|
||||
161
Makefile
161
Makefile
@@ -1,64 +1,129 @@
|
||||
# Calculate version
|
||||
version ?= $(shell git describe --always --tags --dirty)
|
||||
# Headscale Makefile
|
||||
# Modern Makefile following best practices
|
||||
|
||||
rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d))
|
||||
# Version calculation
|
||||
VERSION ?= $(shell git describe --always --tags --dirty)
|
||||
|
||||
# Determine if OS supports pie
|
||||
# Build configuration
|
||||
GOOS ?= $(shell uname | tr '[:upper:]' '[:lower:]')
|
||||
ifeq ($(filter $(GOOS), openbsd netbsd soloaris plan9), )
|
||||
pieflags = -buildmode=pie
|
||||
else
|
||||
ifeq ($(filter $(GOOS), openbsd netbsd solaris plan9), )
|
||||
PIE_FLAGS = -buildmode=pie
|
||||
endif
|
||||
|
||||
# GO_SOURCES = $(wildcard *.go)
|
||||
# PROTO_SOURCES = $(wildcard **/*.proto)
|
||||
GO_SOURCES = $(call rwildcard,,*.go)
|
||||
PROTO_SOURCES = $(call rwildcard,,*.proto)
|
||||
# Tool availability check with nix warning
|
||||
define check_tool
|
||||
@command -v $(1) >/dev/null 2>&1 || { \
|
||||
echo "Warning: $(1) not found. Run 'nix develop' to ensure all dependencies are available."; \
|
||||
exit 1; \
|
||||
}
|
||||
endef
|
||||
|
||||
# Source file collections using shell find for better performance
|
||||
GO_SOURCES := $(shell find . -name '*.go' -not -path './gen/*' -not -path './vendor/*')
|
||||
PROTO_SOURCES := $(shell find . -name '*.proto' -not -path './gen/*' -not -path './vendor/*')
|
||||
DOC_SOURCES := $(shell find . \( -name '*.md' -o -name '*.yaml' -o -name '*.yml' -o -name '*.ts' -o -name '*.js' -o -name '*.html' -o -name '*.css' -o -name '*.scss' -o -name '*.sass' \) -not -path './gen/*' -not -path './vendor/*' -not -path './node_modules/*')
|
||||
|
||||
# Default target
|
||||
.PHONY: all
|
||||
all: lint test build
|
||||
|
||||
# Dependency checking
|
||||
.PHONY: check-deps
|
||||
check-deps:
|
||||
$(call check_tool,go)
|
||||
$(call check_tool,golangci-lint)
|
||||
$(call check_tool,gofumpt)
|
||||
$(call check_tool,prettier)
|
||||
$(call check_tool,clang-format)
|
||||
$(call check_tool,buf)
|
||||
|
||||
# Build targets
|
||||
.PHONY: build
|
||||
build: check-deps $(GO_SOURCES) go.mod go.sum
|
||||
@echo "Building headscale..."
|
||||
go build $(PIE_FLAGS) -ldflags "-X main.version=$(VERSION)" -o headscale ./cmd/headscale
|
||||
|
||||
# Test targets
|
||||
.PHONY: test
|
||||
test: check-deps $(GO_SOURCES) go.mod go.sum
|
||||
@echo "Running Go tests..."
|
||||
go test -race ./...
|
||||
|
||||
|
||||
build:
|
||||
nix build
|
||||
|
||||
dev: lint test build
|
||||
|
||||
test:
|
||||
gotestsum -- -short -coverprofile=coverage.out ./...
|
||||
|
||||
test_integration:
|
||||
docker run \
|
||||
-t --rm \
|
||||
-v ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
-v $$PWD:$$PWD -w $$PWD/integration \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-v $$PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- -failfast ./... -timeout 120m -parallel 8
|
||||
|
||||
lint:
|
||||
golangci-lint run --fix --timeout 10m
|
||||
|
||||
# Formatting targets
|
||||
.PHONY: fmt
|
||||
fmt: fmt-go fmt-prettier fmt-proto
|
||||
|
||||
fmt-prettier:
|
||||
prettier --write '**/**.{ts,js,md,yaml,yml,sass,css,scss,html}'
|
||||
prettier --write --print-width 80 --prose-wrap always CHANGELOG.md
|
||||
|
||||
fmt-go:
|
||||
# TODO(kradalby): Reeval if we want to use 88 in the future.
|
||||
# golines --max-len=88 --base-formatter=gofumpt -w $(GO_SOURCES)
|
||||
.PHONY: fmt-go
|
||||
fmt-go: check-deps $(GO_SOURCES)
|
||||
@echo "Formatting Go code..."
|
||||
gofumpt -l -w .
|
||||
golangci-lint run --fix
|
||||
|
||||
fmt-proto:
|
||||
.PHONY: fmt-prettier
|
||||
fmt-prettier: check-deps $(DOC_SOURCES)
|
||||
@echo "Formatting documentation and config files..."
|
||||
prettier --write '**/*.{ts,js,md,yaml,yml,sass,css,scss,html}'
|
||||
prettier --write --print-width 80 --prose-wrap always CHANGELOG.md
|
||||
|
||||
.PHONY: fmt-proto
|
||||
fmt-proto: check-deps $(PROTO_SOURCES)
|
||||
@echo "Formatting Protocol Buffer files..."
|
||||
clang-format -i $(PROTO_SOURCES)
|
||||
|
||||
proto-lint:
|
||||
cd proto/ && go run github.com/bufbuild/buf/cmd/buf lint
|
||||
# Linting targets
|
||||
.PHONY: lint
|
||||
lint: lint-go lint-proto
|
||||
|
||||
compress: build
|
||||
upx --brute headscale
|
||||
.PHONY: lint-go
|
||||
lint-go: check-deps $(GO_SOURCES) go.mod go.sum
|
||||
@echo "Linting Go code..."
|
||||
golangci-lint run --timeout 10m
|
||||
|
||||
generate:
|
||||
rm -rf gen
|
||||
buf generate proto
|
||||
.PHONY: lint-proto
|
||||
lint-proto: check-deps $(PROTO_SOURCES)
|
||||
@echo "Linting Protocol Buffer files..."
|
||||
cd proto/ && buf lint
|
||||
|
||||
# Code generation
|
||||
.PHONY: generate
|
||||
generate: check-deps
|
||||
@echo "Generating code..."
|
||||
go generate ./...
|
||||
|
||||
# Clean targets
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -rf headscale gen
|
||||
|
||||
# Development workflow
|
||||
.PHONY: dev
|
||||
dev: fmt lint test build
|
||||
|
||||
# Help target
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo "Headscale Development Makefile"
|
||||
@echo ""
|
||||
@echo "Main targets:"
|
||||
@echo " all - Run lint, test, and build (default)"
|
||||
@echo " build - Build headscale binary"
|
||||
@echo " test - Run Go tests"
|
||||
@echo " fmt - Format all code (Go, docs, proto)"
|
||||
@echo " lint - Lint all code (Go, proto)"
|
||||
@echo " generate - Generate code from Protocol Buffers"
|
||||
@echo " dev - Full development workflow (fmt + lint + test + build)"
|
||||
@echo " clean - Clean build artifacts"
|
||||
@echo ""
|
||||
@echo "Specific targets:"
|
||||
@echo " fmt-go - Format Go code only"
|
||||
@echo " fmt-prettier - Format documentation only"
|
||||
@echo " fmt-proto - Format Protocol Buffer files only"
|
||||
@echo " lint-go - Lint Go code only"
|
||||
@echo " lint-proto - Lint Protocol Buffer files only"
|
||||
@echo ""
|
||||
@echo "Dependencies:"
|
||||
@echo " check-deps - Verify required tools are available"
|
||||
@echo ""
|
||||
@echo "Note: If not running in a nix shell, ensure dependencies are available:"
|
||||
@echo " nix develop"
|
||||
45
README.md
45
README.md
@@ -7,8 +7,12 @@ An open source, self-hosted implementation of the Tailscale control server.
|
||||
Join our [Discord server](https://discord.gg/c84AZQhmpx) for a chat.
|
||||
|
||||
**Note:** Always select the same GitHub tag as the released version you use
|
||||
to ensure you have the correct example configuration and documentation.
|
||||
The `main` branch might contain unreleased changes.
|
||||
to ensure you have the correct example configuration. The `main` branch might
|
||||
contain unreleased changes. The documentation is available for stable and
|
||||
development versions:
|
||||
|
||||
- [Documentation for the stable version](https://headscale.net/stable/)
|
||||
- [Documentation for the development version](https://headscale.net/development/)
|
||||
|
||||
## What is Tailscale
|
||||
|
||||
@@ -32,12 +36,12 @@ organisation.
|
||||
|
||||
## Design goal
|
||||
|
||||
Headscale aims to implement a self-hosted, open source alternative to the Tailscale
|
||||
control server.
|
||||
Headscale's goal is to provide self-hosters and hobbyists with an open-source
|
||||
server they can use for their projects and labs.
|
||||
It implements a narrow scope, a single Tailnet, suitable for a personal use, or a small
|
||||
open-source organisation.
|
||||
Headscale aims to implement a self-hosted, open source alternative to the
|
||||
[Tailscale](https://tailscale.com/) control server. Headscale's goal is to
|
||||
provide self-hosters and hobbyists with an open-source server they can use for
|
||||
their projects and labs. It implements a narrow scope, a _single_ Tailscale
|
||||
network (tailnet), suitable for a personal use, or a small open-source
|
||||
organisation.
|
||||
|
||||
## Supporting Headscale
|
||||
|
||||
@@ -134,16 +138,29 @@ make test
|
||||
|
||||
To build the program:
|
||||
|
||||
```shell
|
||||
nix build
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```shell
|
||||
make build
|
||||
```
|
||||
|
||||
### Development workflow
|
||||
|
||||
We recommend using Nix for dependency management to ensure you have all required tools. If you prefer to manage dependencies yourself, you can use Make directly:
|
||||
|
||||
**With Nix (recommended):**
|
||||
```shell
|
||||
nix develop
|
||||
make test
|
||||
make build
|
||||
```
|
||||
|
||||
**With your own dependencies:**
|
||||
```shell
|
||||
make test
|
||||
make build
|
||||
```
|
||||
|
||||
The Makefile will warn you if any required tools are missing and suggest running `nix develop`. Run `make help` to see all available targets.
|
||||
|
||||
## Contributors
|
||||
|
||||
<a href="https://github.com/juanfont/headscale/graphs/contributors">
|
||||
|
||||
@@ -4,10 +4,10 @@ import (
|
||||
"fmt"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -79,7 +79,7 @@ var createNodeCmd = &cobra.Command{
|
||||
)
|
||||
}
|
||||
|
||||
machineKey, err := cmd.Flags().GetString("key")
|
||||
registrationID, err := cmd.Flags().GetString("key")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -88,8 +88,7 @@ var createNodeCmd = &cobra.Command{
|
||||
)
|
||||
}
|
||||
|
||||
var mkey key.MachinePublic
|
||||
err = mkey.UnmarshalText([]byte(machineKey))
|
||||
_, err = types.RegistrationIDFromString(registrationID)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -108,7 +107,7 @@ var createNodeCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
request := &v1.DebugCreateNodeRequest{
|
||||
Key: machineKey,
|
||||
Key: registrationID,
|
||||
Name: name,
|
||||
User: user,
|
||||
Routes: routes,
|
||||
@@ -118,7 +117,7 @@ var createNodeCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot create node: %s", status.Convert(err).Message()),
|
||||
"Cannot create node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package cli
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -68,7 +69,7 @@ func mockOIDC() error {
|
||||
|
||||
userStr := os.Getenv("MOCKOIDC_USERS")
|
||||
if userStr == "" {
|
||||
return fmt.Errorf("MOCKOIDC_USERS not defined")
|
||||
return errors.New("MOCKOIDC_USERS not defined")
|
||||
}
|
||||
|
||||
var users []mockoidc.MockUser
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/samber/lo"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
"tailscale.com/types/key"
|
||||
@@ -27,9 +28,11 @@ func init() {
|
||||
listNodesNamespaceFlag := listNodesCmd.Flags().Lookup("namespace")
|
||||
listNodesNamespaceFlag.Deprecated = deprecateNamespaceMessage
|
||||
listNodesNamespaceFlag.Hidden = true
|
||||
|
||||
nodeCmd.AddCommand(listNodesCmd)
|
||||
|
||||
listNodeRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
nodeCmd.AddCommand(listNodeRoutesCmd)
|
||||
|
||||
registerNodeCmd.Flags().StringP("user", "u", "", "User")
|
||||
|
||||
registerNodeCmd.Flags().StringP("namespace", "n", "", "User")
|
||||
@@ -39,33 +42,33 @@ func init() {
|
||||
|
||||
err := registerNodeCmd.MarkFlagRequired("user")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
registerNodeCmd.Flags().StringP("key", "k", "", "Key")
|
||||
err = registerNodeCmd.MarkFlagRequired("key")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(registerNodeCmd)
|
||||
|
||||
expireNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = expireNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(expireNodeCmd)
|
||||
|
||||
renameNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = renameNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(renameNodeCmd)
|
||||
|
||||
deleteNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = deleteNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(deleteNodeCmd)
|
||||
|
||||
@@ -73,10 +76,10 @@ func init() {
|
||||
|
||||
err = moveNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
moveNodeCmd.Flags().StringP("user", "u", "", "New user")
|
||||
moveNodeCmd.Flags().Uint64P("user", "u", 0, "New user")
|
||||
|
||||
moveNodeCmd.Flags().StringP("namespace", "n", "", "User")
|
||||
moveNodeNamespaceFlag := moveNodeCmd.Flags().Lookup("namespace")
|
||||
@@ -85,20 +88,20 @@ func init() {
|
||||
|
||||
err = moveNodeCmd.MarkFlagRequired("user")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(moveNodeCmd)
|
||||
|
||||
tagCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
|
||||
err = tagCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
tagCmd.Flags().
|
||||
StringSliceP("tags", "t", []string{}, "List of tags to add to the node")
|
||||
tagCmd.MarkFlagRequired("identifier")
|
||||
tagCmd.Flags().StringSliceP("tags", "t", []string{}, "List of tags to add to the node")
|
||||
nodeCmd.AddCommand(tagCmd)
|
||||
|
||||
approveRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
approveRoutesCmd.MarkFlagRequired("identifier")
|
||||
approveRoutesCmd.Flags().StringSliceP("routes", "r", []string{}, `List of routes that will be approved (comma-separated, e.g. "10.0.0.0/8,192.168.0.0/24" or empty string to remove all approved routes)`)
|
||||
nodeCmd.AddCommand(approveRoutesCmd)
|
||||
|
||||
nodeCmd.AddCommand(backfillNodeIPsCmd)
|
||||
}
|
||||
|
||||
@@ -122,7 +125,7 @@ var registerNodeCmd = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
machineKey, err := cmd.Flags().GetString("key")
|
||||
registrationID, err := cmd.Flags().GetString("key")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -132,7 +135,7 @@ var registerNodeCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
request := &v1.RegisterNodeRequest{
|
||||
Key: machineKey,
|
||||
Key: registrationID,
|
||||
User: user,
|
||||
}
|
||||
|
||||
@@ -181,7 +184,7 @@ var listNodesCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get nodes: %s", status.Convert(err).Message()),
|
||||
"Cannot get nodes: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
}
|
||||
@@ -206,6 +209,72 @@ var listNodesCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var listNodeRoutesCmd = &cobra.Command{
|
||||
Use: "list-routes",
|
||||
Short: "List routes available on nodes",
|
||||
Aliases: []string{"lsr", "routes"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ListNodesRequest{}
|
||||
|
||||
response, err := client.ListNodes(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot get nodes: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetNodes(), "", output)
|
||||
}
|
||||
|
||||
nodes := response.GetNodes()
|
||||
if identifier != 0 {
|
||||
for _, node := range response.GetNodes() {
|
||||
if node.GetId() == identifier {
|
||||
nodes = []*v1.Node{node}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodes = lo.Filter(nodes, func(n *v1.Node, _ int) bool {
|
||||
return (n.GetSubnetRoutes() != nil && len(n.GetSubnetRoutes()) > 0) || (n.GetApprovedRoutes() != nil && len(n.GetApprovedRoutes()) > 0) || (n.GetAvailableRoutes() != nil && len(n.GetAvailableRoutes()) > 0)
|
||||
})
|
||||
|
||||
tableData, err := nodeRoutesToPtables(nodes)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
}
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var expireNodeCmd = &cobra.Command{
|
||||
Use: "expire",
|
||||
Short: "Expire (log out) a node in your network",
|
||||
@@ -329,10 +398,7 @@ var deleteNodeCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error getting node node: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
"Error getting node node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
|
||||
@@ -368,10 +434,7 @@ var deleteNodeCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error deleting node: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
"Error deleting node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
|
||||
@@ -406,7 +469,7 @@ var moveNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
user, err := cmd.Flags().GetString("user")
|
||||
user, err := cmd.Flags().GetUint64("user")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -429,10 +492,7 @@ var moveNodeCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error getting node: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
"Error getting node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
|
||||
@@ -448,10 +508,7 @@ var moveNodeCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error moving node: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
"Error moving node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
|
||||
@@ -498,10 +555,7 @@ be assigned to nodes.`,
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error backfilling IPs: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
"Error backfilling IPs: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
|
||||
@@ -657,6 +711,35 @@ func nodesToPtables(
|
||||
return tableData, nil
|
||||
}
|
||||
|
||||
func nodeRoutesToPtables(
|
||||
nodes []*v1.Node,
|
||||
) (pterm.TableData, error) {
|
||||
tableHeader := []string{
|
||||
"ID",
|
||||
"Hostname",
|
||||
"Approved",
|
||||
"Available",
|
||||
"Serving (Primary)",
|
||||
}
|
||||
tableData := pterm.TableData{tableHeader}
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeData := []string{
|
||||
strconv.FormatUint(node.GetId(), util.Base10),
|
||||
node.GetGivenName(),
|
||||
strings.Join(node.GetApprovedRoutes(), ", "),
|
||||
strings.Join(node.GetAvailableRoutes(), ", "),
|
||||
strings.Join(node.GetSubnetRoutes(), ", "),
|
||||
}
|
||||
tableData = append(
|
||||
tableData,
|
||||
nodeData,
|
||||
)
|
||||
}
|
||||
|
||||
return tableData, nil
|
||||
}
|
||||
|
||||
var tagCmd = &cobra.Command{
|
||||
Use: "tag",
|
||||
Short: "Manage the tags of a node",
|
||||
@@ -714,3 +797,60 @@ var tagCmd = &cobra.Command{
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var approveRoutesCmd = &cobra.Command{
|
||||
Use: "approve-routes",
|
||||
Short: "Manage the approved routes of a node",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
// retrieve flags from CLI
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
routes, err := cmd.Flags().GetStringSlice("routes")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error retrieving list of routes to add to node, %v", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sending routes to node
|
||||
request := &v1.SetApprovedRoutesRequest{
|
||||
NodeId: identifier,
|
||||
Routes: routes,
|
||||
}
|
||||
resp, err := client.SetApprovedRoutes(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error while sending routes to headscale: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
SuccessOutput(
|
||||
resp.GetNode(),
|
||||
"Node updated",
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -6,8 +6,11 @@ import (
|
||||
"os"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"tailscale.com/types/views"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -19,6 +22,12 @@ func init() {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
policyCmd.AddCommand(setPolicy)
|
||||
|
||||
checkPolicy.Flags().StringP("file", "f", "", "Path to a policy file in HuJSON format")
|
||||
if err := checkPolicy.MarkFlagRequired("file"); err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
policyCmd.AddCommand(checkPolicy)
|
||||
}
|
||||
|
||||
var policyCmd = &cobra.Command{
|
||||
@@ -85,3 +94,30 @@ var setPolicy = &cobra.Command{
|
||||
SuccessOutput(nil, "Policy updated.", "")
|
||||
},
|
||||
}
|
||||
|
||||
var checkPolicy = &cobra.Command{
|
||||
Use: "check",
|
||||
Short: "Check the Policy file for errors",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
policyPath, _ := cmd.Flags().GetString("file")
|
||||
|
||||
f, err := os.Open(policyPath)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error opening the policy file: %s", err), output)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
policyBytes, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error reading the policy file: %s", err), output)
|
||||
}
|
||||
|
||||
_, err = policy.NewPolicyManager(policyBytes, nil, views.Slice[types.NodeView]{})
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error parsing the policy file: %s", err), output)
|
||||
}
|
||||
|
||||
SuccessOutput(nil, "Policy is valid", "")
|
||||
},
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ const (
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(preauthkeysCmd)
|
||||
preauthkeysCmd.PersistentFlags().StringP("user", "u", "", "User")
|
||||
preauthkeysCmd.PersistentFlags().Uint64P("user", "u", 0, "User identifier (ID)")
|
||||
|
||||
preauthkeysCmd.PersistentFlags().StringP("namespace", "n", "", "User")
|
||||
pakNamespaceFlag := preauthkeysCmd.PersistentFlags().Lookup("namespace")
|
||||
@@ -57,7 +57,7 @@ var listPreAuthKeys = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
user, err := cmd.Flags().GetString("user")
|
||||
user, err := cmd.Flags().GetUint64("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
}
|
||||
@@ -112,7 +112,7 @@ var listPreAuthKeys = &cobra.Command{
|
||||
aclTags = strings.TrimLeft(aclTags, ",")
|
||||
|
||||
tableData = append(tableData, []string{
|
||||
key.GetId(),
|
||||
strconv.FormatUint(key.GetId(), 10),
|
||||
key.GetKey(),
|
||||
strconv.FormatBool(key.GetReusable()),
|
||||
strconv.FormatBool(key.GetEphemeral()),
|
||||
@@ -141,7 +141,7 @@ var createPreAuthKeyCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
user, err := cmd.Flags().GetString("user")
|
||||
user, err := cmd.Flags().GetUint64("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
}
|
||||
@@ -206,7 +206,7 @@ var expirePreAuthKeyCmd = &cobra.Command{
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
user, err := cmd.Flags().GetString("user")
|
||||
user, err := cmd.Flags().GetUint64("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog"
|
||||
@@ -25,6 +26,11 @@ func init() {
|
||||
return
|
||||
}
|
||||
|
||||
if slices.Contains(os.Args, "policy") && slices.Contains(os.Args, "check") {
|
||||
zerolog.SetGlobalLevel(zerolog.Disabled)
|
||||
return
|
||||
}
|
||||
|
||||
cobra.OnInitialize(initConfig)
|
||||
rootCmd.PersistentFlags().
|
||||
StringVarP(&cfgFile, "config", "c", "", "config file (default is /etc/headscale/config.yaml)")
|
||||
@@ -58,26 +64,26 @@ func initConfig() {
|
||||
zerolog.SetGlobalLevel(zerolog.Disabled)
|
||||
}
|
||||
|
||||
// logFormat := viper.GetString("log.format")
|
||||
// if logFormat == types.JSONLogFormat {
|
||||
// log.Logger = log.Output(os.Stdout)
|
||||
// }
|
||||
logFormat := viper.GetString("log.format")
|
||||
if logFormat == types.JSONLogFormat {
|
||||
log.Logger = log.Output(os.Stdout)
|
||||
}
|
||||
|
||||
disableUpdateCheck := viper.GetBool("disable_check_updates")
|
||||
if !disableUpdateCheck && !machineOutput {
|
||||
if (runtime.GOOS == "linux" || runtime.GOOS == "darwin") &&
|
||||
Version != "dev" {
|
||||
types.Version != "dev" {
|
||||
githubTag := &latest.GithubTag{
|
||||
Owner: "juanfont",
|
||||
Repository: "headscale",
|
||||
}
|
||||
res, err := latest.Check(githubTag, Version)
|
||||
res, err := latest.Check(githubTag, types.Version)
|
||||
if err == nil && res.Outdated {
|
||||
//nolint
|
||||
log.Warn().Msgf(
|
||||
"An updated version of Headscale has been found (%s vs. your current %s). Check it out https://github.com/juanfont/headscale/releases\n",
|
||||
res.Current,
|
||||
Version,
|
||||
types.Version,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,266 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
"tailscale.com/net/tsaddr"
|
||||
)
|
||||
|
||||
const (
|
||||
Base10 = 10
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(routesCmd)
|
||||
listRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
routesCmd.AddCommand(listRoutesCmd)
|
||||
|
||||
enableRouteCmd.Flags().Uint64P("route", "r", 0, "Route identifier (ID)")
|
||||
err := enableRouteCmd.MarkFlagRequired("route")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
routesCmd.AddCommand(enableRouteCmd)
|
||||
|
||||
disableRouteCmd.Flags().Uint64P("route", "r", 0, "Route identifier (ID)")
|
||||
err = disableRouteCmd.MarkFlagRequired("route")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
routesCmd.AddCommand(disableRouteCmd)
|
||||
|
||||
deleteRouteCmd.Flags().Uint64P("route", "r", 0, "Route identifier (ID)")
|
||||
err = deleteRouteCmd.MarkFlagRequired("route")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
routesCmd.AddCommand(deleteRouteCmd)
|
||||
}
|
||||
|
||||
var routesCmd = &cobra.Command{
|
||||
Use: "routes",
|
||||
Short: "Manage the routes of Headscale",
|
||||
Aliases: []string{"r", "route"},
|
||||
}
|
||||
|
||||
var listRoutesCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all routes",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
machineID, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine id from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
var routes []*v1.Route
|
||||
|
||||
if machineID == 0 {
|
||||
response, err := client.GetRoutes(ctx, &v1.GetRoutesRequest{})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get nodes: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetRoutes(), "", output)
|
||||
}
|
||||
|
||||
routes = response.GetRoutes()
|
||||
} else {
|
||||
response, err := client.GetNodeRoutes(ctx, &v1.GetNodeRoutesRequest{
|
||||
NodeId: machineID,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get routes for node %d: %s", machineID, status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetRoutes(), "", output)
|
||||
}
|
||||
|
||||
routes = response.GetRoutes()
|
||||
}
|
||||
|
||||
tableData := routesToPtables(routes)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
}
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var enableRouteCmd = &cobra.Command{
|
||||
Use: "enable",
|
||||
Short: "Set a route as enabled",
|
||||
Long: `This command will make as enabled a given route.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
routeID, err := cmd.Flags().GetUint64("route")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine id from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.EnableRoute(ctx, &v1.EnableRouteRequest{
|
||||
RouteId: routeID,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot enable route %d: %s", routeID, status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response, "", output)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var disableRouteCmd = &cobra.Command{
|
||||
Use: "disable",
|
||||
Short: "Set as disabled a given route",
|
||||
Long: `This command will make as disabled a given route.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
routeID, err := cmd.Flags().GetUint64("route")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine id from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.DisableRoute(ctx, &v1.DisableRouteRequest{
|
||||
RouteId: routeID,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot disable route %d: %s", routeID, status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response, "", output)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var deleteRouteCmd = &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete a given route",
|
||||
Long: `This command will delete a given route.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
routeID, err := cmd.Flags().GetUint64("route")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine id from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.DeleteRoute(ctx, &v1.DeleteRouteRequest{
|
||||
RouteId: routeID,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot delete route %d: %s", routeID, status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response, "", output)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// routesToPtables converts the list of routes to a nice table.
|
||||
func routesToPtables(routes []*v1.Route) pterm.TableData {
|
||||
tableData := pterm.TableData{{"ID", "Node", "Prefix", "Advertised", "Enabled", "Primary"}}
|
||||
|
||||
for _, route := range routes {
|
||||
var isPrimaryStr string
|
||||
prefix, err := netip.ParsePrefix(route.GetPrefix())
|
||||
if err != nil {
|
||||
log.Printf("Error parsing prefix %s: %s", route.GetPrefix(), err)
|
||||
|
||||
continue
|
||||
}
|
||||
if tsaddr.IsExitRoute(prefix) {
|
||||
isPrimaryStr = "-"
|
||||
} else {
|
||||
isPrimaryStr = strconv.FormatBool(route.GetIsPrimary())
|
||||
}
|
||||
|
||||
tableData = append(tableData,
|
||||
[]string{
|
||||
strconv.FormatUint(route.GetId(), Base10),
|
||||
route.GetNode().GetGivenName(),
|
||||
route.GetPrefix(),
|
||||
strconv.FormatBool(route.GetAdvertised()),
|
||||
strconv.FormatBool(route.GetEnabled()),
|
||||
isPrimaryStr,
|
||||
})
|
||||
}
|
||||
|
||||
return tableData
|
||||
}
|
||||
@@ -2,10 +2,12 @@ package cli
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tailscale/squibble"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -21,6 +23,12 @@ var serveCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app, err := newHeadscaleServerWithConfig()
|
||||
if err != nil {
|
||||
var squibbleErr squibble.ValidationError
|
||||
if errors.As(err, &squibbleErr) {
|
||||
fmt.Printf("SQLite schema failed to validate:\n")
|
||||
fmt.Println(squibbleErr.Diff)
|
||||
}
|
||||
|
||||
log.Fatal().Caller().Err(err).Msg("Error initializing")
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,8 @@ package cli
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
survey "github.com/AlecAivazis/survey/v2"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
@@ -26,10 +28,7 @@ func usernameAndIDFromFlag(cmd *cobra.Command) (uint64, string) {
|
||||
err := errors.New("--name or --identifier flag is required")
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot rename user: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
"Cannot rename user: "+status.Convert(err).Message(),
|
||||
"",
|
||||
)
|
||||
}
|
||||
@@ -40,6 +39,9 @@ func usernameAndIDFromFlag(cmd *cobra.Command) (uint64, string) {
|
||||
func init() {
|
||||
rootCmd.AddCommand(userCmd)
|
||||
userCmd.AddCommand(createUserCmd)
|
||||
createUserCmd.Flags().StringP("display-name", "d", "", "Display name")
|
||||
createUserCmd.Flags().StringP("email", "e", "", "Email")
|
||||
createUserCmd.Flags().StringP("picture-url", "p", "", "Profile picture URL")
|
||||
userCmd.AddCommand(listUsersCmd)
|
||||
usernameAndIDFlag(listUsersCmd)
|
||||
listUsersCmd.Flags().StringP("email", "e", "", "Email")
|
||||
@@ -83,15 +85,34 @@ var createUserCmd = &cobra.Command{
|
||||
|
||||
request := &v1.CreateUserRequest{Name: userName}
|
||||
|
||||
if displayName, _ := cmd.Flags().GetString("display-name"); displayName != "" {
|
||||
request.DisplayName = displayName
|
||||
}
|
||||
|
||||
if email, _ := cmd.Flags().GetString("email"); email != "" {
|
||||
request.Email = email
|
||||
}
|
||||
|
||||
if pictureURL, _ := cmd.Flags().GetString("picture-url"); pictureURL != "" {
|
||||
if _, err := url.Parse(pictureURL); err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Invalid Picture URL: %s",
|
||||
err,
|
||||
),
|
||||
output,
|
||||
)
|
||||
}
|
||||
request.PictureUrl = pictureURL
|
||||
}
|
||||
|
||||
log.Trace().Interface("request", request).Msg("Sending CreateUser request")
|
||||
response, err := client.CreateUser(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot create user: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
"Cannot create user: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
}
|
||||
@@ -121,16 +142,16 @@ var destroyUserCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error: %s", status.Convert(err).Message()),
|
||||
"Error: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if len(users.GetUsers()) != 1 {
|
||||
err := fmt.Errorf("Unable to determine user to delete, query returned multiple users, use ID")
|
||||
err := errors.New("Unable to determine user to delete, query returned multiple users, use ID")
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error: %s", status.Convert(err).Message()),
|
||||
"Error: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
}
|
||||
@@ -159,10 +180,7 @@ var destroyUserCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot destroy user: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
"Cannot destroy user: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
}
|
||||
@@ -194,20 +212,17 @@ var listUsersCmd = &cobra.Command{
|
||||
switch {
|
||||
case id > 0:
|
||||
request.Id = uint64(id)
|
||||
break
|
||||
case username != "":
|
||||
request.Name = username
|
||||
break
|
||||
case email != "":
|
||||
request.Email = email
|
||||
break
|
||||
}
|
||||
|
||||
response, err := client.ListUsers(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get users: %s", status.Convert(err).Message()),
|
||||
"Cannot get users: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
}
|
||||
@@ -221,7 +236,7 @@ var listUsersCmd = &cobra.Command{
|
||||
tableData = append(
|
||||
tableData,
|
||||
[]string{
|
||||
fmt.Sprintf("%d", user.GetId()),
|
||||
strconv.FormatUint(user.GetId(), 10),
|
||||
user.GetDisplayName(),
|
||||
user.GetName(),
|
||||
user.GetEmail(),
|
||||
@@ -261,16 +276,16 @@ var renameUserCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error: %s", status.Convert(err).Message()),
|
||||
"Error: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if len(users.GetUsers()) != 1 {
|
||||
err := fmt.Errorf("Unable to determine user to delete, query returned multiple users, use ID")
|
||||
err := errors.New("Unable to determine user to delete, query returned multiple users, use ID")
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error: %s", status.Convert(err).Message()),
|
||||
"Error: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
}
|
||||
@@ -286,10 +301,7 @@ var renameUserCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot rename user: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
"Cannot rename user: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -27,14 +27,14 @@ func newHeadscaleServerWithConfig() (*hscontrol.Headscale, error) {
|
||||
cfg, err := types.LoadServerConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to load configuration while creating headscale instance: %w",
|
||||
"loading configuration: %w",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
app, err := hscontrol.NewHeadscale(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("creating new headscale: %w", err)
|
||||
}
|
||||
|
||||
return app, nil
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var Version = "dev"
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(versionCmd)
|
||||
}
|
||||
@@ -16,6 +15,9 @@ var versionCmd = &cobra.Command{
|
||||
Long: "The version of headscale.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
SuccessOutput(map[string]string{"version": Version}, Version, output)
|
||||
SuccessOutput(map[string]string{
|
||||
"version": types.Version,
|
||||
"commit": types.GitCommitHash,
|
||||
}, types.Version, output)
|
||||
},
|
||||
}
|
||||
|
||||
207
cmd/hi/cleanup.go
Normal file
207
cmd/hi/cleanup.go
Normal file
@@ -0,0 +1,207 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/errdefs"
|
||||
)
|
||||
|
||||
// cleanupBeforeTest performs cleanup operations before running tests.
|
||||
func cleanupBeforeTest(ctx context.Context) error {
|
||||
if err := killTestContainers(ctx); err != nil {
|
||||
return fmt.Errorf("failed to kill test containers: %w", err)
|
||||
}
|
||||
|
||||
if err := pruneDockerNetworks(ctx); err != nil {
|
||||
return fmt.Errorf("failed to prune networks: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupAfterTest removes the test container after completion.
|
||||
func cleanupAfterTest(ctx context.Context, cli *client.Client, containerID string) error {
|
||||
return cli.ContainerRemove(ctx, containerID, container.RemoveOptions{
|
||||
Force: true,
|
||||
})
|
||||
}
|
||||
|
||||
// killTestContainers terminates and removes all test containers.
|
||||
func killTestContainers(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{
|
||||
All: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers: %w", err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
for _, cont := range containers {
|
||||
shouldRemove := false
|
||||
for _, name := range cont.Names {
|
||||
if strings.Contains(name, "headscale-test-suite") ||
|
||||
strings.Contains(name, "hs-") ||
|
||||
strings.Contains(name, "ts-") ||
|
||||
strings.Contains(name, "derp-") {
|
||||
shouldRemove = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if shouldRemove {
|
||||
// First kill the container if it's running
|
||||
if cont.State == "running" {
|
||||
_ = cli.ContainerKill(ctx, cont.ID, "KILL")
|
||||
}
|
||||
|
||||
// Then remove the container with retry logic
|
||||
if removeContainerWithRetry(ctx, cli, cont.ID) {
|
||||
removed++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if removed > 0 {
|
||||
fmt.Printf("Removed %d test containers\n", removed)
|
||||
} else {
|
||||
fmt.Println("No test containers found to remove")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeContainerWithRetry attempts to remove a container with exponential backoff retry logic.
|
||||
func removeContainerWithRetry(ctx context.Context, cli *client.Client, containerID string) bool {
|
||||
maxRetries := 3
|
||||
baseDelay := 100 * time.Millisecond
|
||||
|
||||
for attempt := range maxRetries {
|
||||
err := cli.ContainerRemove(ctx, containerID, container.RemoveOptions{
|
||||
Force: true,
|
||||
})
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// If this is the last attempt, don't wait
|
||||
if attempt == maxRetries-1 {
|
||||
break
|
||||
}
|
||||
|
||||
// Wait with exponential backoff
|
||||
delay := baseDelay * time.Duration(1<<attempt)
|
||||
time.Sleep(delay)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// pruneDockerNetworks removes unused Docker networks.
|
||||
func pruneDockerNetworks(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
report, err := cli.NetworksPrune(ctx, filters.Args{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prune networks: %w", err)
|
||||
}
|
||||
|
||||
if len(report.NetworksDeleted) > 0 {
|
||||
fmt.Printf("Removed %d unused networks\n", len(report.NetworksDeleted))
|
||||
} else {
|
||||
fmt.Println("No unused networks found to remove")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanOldImages removes test-related and old dangling Docker images.
|
||||
func cleanOldImages(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
images, err := cli.ImageList(ctx, image.ListOptions{
|
||||
All: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list images: %w", err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
for _, img := range images {
|
||||
shouldRemove := false
|
||||
for _, tag := range img.RepoTags {
|
||||
if strings.Contains(tag, "hs-") ||
|
||||
strings.Contains(tag, "headscale-integration") ||
|
||||
strings.Contains(tag, "tailscale") {
|
||||
shouldRemove = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(img.RepoTags) == 0 && time.Unix(img.Created, 0).Before(time.Now().Add(-7*24*time.Hour)) {
|
||||
shouldRemove = true
|
||||
}
|
||||
|
||||
if shouldRemove {
|
||||
_, err := cli.ImageRemove(ctx, img.ID, image.RemoveOptions{
|
||||
Force: true,
|
||||
})
|
||||
if err == nil {
|
||||
removed++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if removed > 0 {
|
||||
fmt.Printf("Removed %d test images\n", removed)
|
||||
} else {
|
||||
fmt.Println("No test images found to remove")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanCacheVolume removes the Docker volume used for Go module cache.
|
||||
func cleanCacheVolume(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
volumeName := "hs-integration-go-cache"
|
||||
err = cli.VolumeRemove(ctx, volumeName, true)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
fmt.Printf("Go module cache volume not found: %s\n", volumeName)
|
||||
} else if errdefs.IsConflict(err) {
|
||||
fmt.Printf("Go module cache volume is in use and cannot be removed: %s\n", volumeName)
|
||||
} else {
|
||||
fmt.Printf("Failed to remove Go module cache volume %s: %v\n", volumeName, err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Removed Go module cache volume: %s\n", volumeName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
762
cmd/hi/docker.go
Normal file
762
cmd/hi/docker.go
Normal file
@@ -0,0 +1,762 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrTestFailed = errors.New("test failed")
|
||||
ErrUnexpectedContainerWait = errors.New("unexpected end of container wait")
|
||||
ErrNoDockerContext = errors.New("no docker context found")
|
||||
)
|
||||
|
||||
// runTestContainer executes integration tests in a Docker container.
|
||||
func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
runID := dockertestutil.GenerateRunID()
|
||||
containerName := "headscale-test-suite-" + runID
|
||||
logsDir := filepath.Join(config.LogsDir, runID)
|
||||
|
||||
if config.Verbose {
|
||||
log.Printf("Run ID: %s", runID)
|
||||
log.Printf("Container name: %s", containerName)
|
||||
log.Printf("Logs directory: %s", logsDir)
|
||||
}
|
||||
|
||||
absLogsDir, err := filepath.Abs(logsDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get absolute path for logs directory: %w", err)
|
||||
}
|
||||
|
||||
const dirPerm = 0o755
|
||||
if err := os.MkdirAll(absLogsDir, dirPerm); err != nil {
|
||||
return fmt.Errorf("failed to create logs directory: %w", err)
|
||||
}
|
||||
|
||||
if config.CleanBefore {
|
||||
if config.Verbose {
|
||||
log.Printf("Running pre-test cleanup...")
|
||||
}
|
||||
if err := cleanupBeforeTest(ctx); err != nil && config.Verbose {
|
||||
log.Printf("Warning: pre-test cleanup failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
goTestCmd := buildGoTestCommand(config)
|
||||
if config.Verbose {
|
||||
log.Printf("Command: %s", strings.Join(goTestCmd, " "))
|
||||
}
|
||||
|
||||
imageName := "golang:" + config.GoVersion
|
||||
if err := ensureImageAvailable(ctx, cli, imageName, config.Verbose); err != nil {
|
||||
return fmt.Errorf("failed to ensure image availability: %w", err)
|
||||
}
|
||||
|
||||
resp, err := createGoTestContainer(ctx, cli, config, containerName, absLogsDir, goTestCmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create container: %w", err)
|
||||
}
|
||||
|
||||
if config.Verbose {
|
||||
log.Printf("Created container: %s", resp.ID)
|
||||
}
|
||||
|
||||
if err := cli.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to start container: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("Starting test: %s", config.TestPattern)
|
||||
|
||||
// Start stats collection for container resource monitoring (if enabled)
|
||||
var statsCollector *StatsCollector
|
||||
if config.Stats {
|
||||
var err error
|
||||
statsCollector, err = NewStatsCollector()
|
||||
if err != nil {
|
||||
if config.Verbose {
|
||||
log.Printf("Warning: failed to create stats collector: %v", err)
|
||||
}
|
||||
statsCollector = nil
|
||||
}
|
||||
|
||||
if statsCollector != nil {
|
||||
defer statsCollector.Close()
|
||||
|
||||
// Start stats collection immediately - no need for complex retry logic
|
||||
// The new implementation monitors Docker events and will catch containers as they start
|
||||
if err := statsCollector.StartCollection(ctx, runID, config.Verbose); err != nil {
|
||||
if config.Verbose {
|
||||
log.Printf("Warning: failed to start stats collection: %v", err)
|
||||
}
|
||||
}
|
||||
defer statsCollector.StopCollection()
|
||||
}
|
||||
}
|
||||
|
||||
exitCode, err := streamAndWait(ctx, cli, resp.ID)
|
||||
|
||||
// Ensure all containers have finished and logs are flushed before extracting artifacts
|
||||
if waitErr := waitForContainerFinalization(ctx, cli, resp.ID, config.Verbose); waitErr != nil && config.Verbose {
|
||||
log.Printf("Warning: failed to wait for container finalization: %v", waitErr)
|
||||
}
|
||||
|
||||
// Extract artifacts from test containers before cleanup
|
||||
if err := extractArtifactsFromContainers(ctx, resp.ID, logsDir, config.Verbose); err != nil && config.Verbose {
|
||||
log.Printf("Warning: failed to extract artifacts from containers: %v", err)
|
||||
}
|
||||
|
||||
// Always list control files regardless of test outcome
|
||||
listControlFiles(logsDir)
|
||||
|
||||
// Print stats summary and check memory limits if enabled
|
||||
if config.Stats && statsCollector != nil {
|
||||
violations := statsCollector.PrintSummaryAndCheckLimits(config.HSMemoryLimit, config.TSMemoryLimit)
|
||||
if len(violations) > 0 {
|
||||
log.Printf("MEMORY LIMIT VIOLATIONS DETECTED:")
|
||||
log.Printf("=================================")
|
||||
for _, violation := range violations {
|
||||
log.Printf("Container %s exceeded memory limit: %.1f MB > %.1f MB",
|
||||
violation.ContainerName, violation.MaxMemoryMB, violation.LimitMB)
|
||||
}
|
||||
return fmt.Errorf("test failed: %d container(s) exceeded memory limits", len(violations))
|
||||
}
|
||||
}
|
||||
|
||||
shouldCleanup := config.CleanAfter && (!config.KeepOnFailure || exitCode == 0)
|
||||
if shouldCleanup {
|
||||
if config.Verbose {
|
||||
log.Printf("Running post-test cleanup...")
|
||||
}
|
||||
if cleanErr := cleanupAfterTest(ctx, cli, resp.ID); cleanErr != nil && config.Verbose {
|
||||
log.Printf("Warning: post-test cleanup failed: %v", cleanErr)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("test execution failed: %w", err)
|
||||
}
|
||||
|
||||
if exitCode != 0 {
|
||||
return fmt.Errorf("%w: exit code %d", ErrTestFailed, exitCode)
|
||||
}
|
||||
|
||||
log.Printf("Test completed successfully!")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildGoTestCommand constructs the go test command arguments.
|
||||
func buildGoTestCommand(config *RunConfig) []string {
|
||||
cmd := []string{"go", "test", "./..."}
|
||||
|
||||
if config.TestPattern != "" {
|
||||
cmd = append(cmd, "-run", config.TestPattern)
|
||||
}
|
||||
|
||||
if config.FailFast {
|
||||
cmd = append(cmd, "-failfast")
|
||||
}
|
||||
|
||||
cmd = append(cmd, "-timeout", config.Timeout.String())
|
||||
cmd = append(cmd, "-v")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// createGoTestContainer creates a Docker container configured for running integration tests.
|
||||
func createGoTestContainer(ctx context.Context, cli *client.Client, config *RunConfig, containerName, logsDir string, goTestCmd []string) (container.CreateResponse, error) {
|
||||
pwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return container.CreateResponse{}, fmt.Errorf("failed to get working directory: %w", err)
|
||||
}
|
||||
|
||||
projectRoot := findProjectRoot(pwd)
|
||||
|
||||
runID := dockertestutil.ExtractRunIDFromContainerName(containerName)
|
||||
|
||||
env := []string{
|
||||
fmt.Sprintf("HEADSCALE_INTEGRATION_POSTGRES=%d", boolToInt(config.UsePostgres)),
|
||||
"HEADSCALE_INTEGRATION_RUN_ID=" + runID,
|
||||
}
|
||||
containerConfig := &container.Config{
|
||||
Image: "golang:" + config.GoVersion,
|
||||
Cmd: goTestCmd,
|
||||
Env: env,
|
||||
WorkingDir: projectRoot + "/integration",
|
||||
Tty: true,
|
||||
Labels: map[string]string{
|
||||
"hi.run-id": runID,
|
||||
"hi.test-type": "test-runner",
|
||||
},
|
||||
}
|
||||
|
||||
// Get the correct Docker socket path from the current context
|
||||
dockerSocketPath := getDockerSocketPath()
|
||||
|
||||
if config.Verbose {
|
||||
log.Printf("Using Docker socket: %s", dockerSocketPath)
|
||||
}
|
||||
|
||||
hostConfig := &container.HostConfig{
|
||||
AutoRemove: false, // We'll remove manually for better control
|
||||
Binds: []string{
|
||||
fmt.Sprintf("%s:%s", projectRoot, projectRoot),
|
||||
dockerSocketPath + ":/var/run/docker.sock",
|
||||
logsDir + ":/tmp/control",
|
||||
},
|
||||
Mounts: []mount.Mount{
|
||||
{
|
||||
Type: mount.TypeVolume,
|
||||
Source: "hs-integration-go-cache",
|
||||
Target: "/go",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return cli.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, containerName)
|
||||
}
|
||||
|
||||
// streamAndWait streams container output and waits for completion.
|
||||
func streamAndWait(ctx context.Context, cli *client.Client, containerID string) (int, error) {
|
||||
out, err := cli.ContainerLogs(ctx, containerID, container.LogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Follow: true,
|
||||
})
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("failed to get container logs: %w", err)
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
go func() {
|
||||
_, _ = io.Copy(os.Stdout, out)
|
||||
}()
|
||||
|
||||
statusCh, errCh := cli.ContainerWait(ctx, containerID, container.WaitConditionNotRunning)
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error waiting for container: %w", err)
|
||||
}
|
||||
case status := <-statusCh:
|
||||
return int(status.StatusCode), nil
|
||||
}
|
||||
|
||||
return -1, ErrUnexpectedContainerWait
|
||||
}
|
||||
|
||||
// waitForContainerFinalization ensures all test containers have properly finished and flushed their output.
|
||||
func waitForContainerFinalization(ctx context.Context, cli *client.Client, testContainerID string, verbose bool) error {
|
||||
// First, get all related test containers
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers: %w", err)
|
||||
}
|
||||
|
||||
testContainers := getCurrentTestContainers(containers, testContainerID, verbose)
|
||||
|
||||
// Wait for all test containers to reach a final state
|
||||
maxWaitTime := 10 * time.Second
|
||||
checkInterval := 500 * time.Millisecond
|
||||
timeout := time.After(maxWaitTime)
|
||||
ticker := time.NewTicker(checkInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
if verbose {
|
||||
log.Printf("Timeout waiting for container finalization, proceeding with artifact extraction")
|
||||
}
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
allFinalized := true
|
||||
|
||||
for _, testCont := range testContainers {
|
||||
inspect, err := cli.ContainerInspect(ctx, testCont.ID)
|
||||
if err != nil {
|
||||
if verbose {
|
||||
log.Printf("Warning: failed to inspect container %s: %v", testCont.name, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if container is in a final state
|
||||
if !isContainerFinalized(inspect.State) {
|
||||
allFinalized = false
|
||||
if verbose {
|
||||
log.Printf("Container %s still finalizing (state: %s)", testCont.name, inspect.State.Status)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if allFinalized {
|
||||
if verbose {
|
||||
log.Printf("All test containers finalized, ready for artifact extraction")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isContainerFinalized checks if a container has reached a final state where logs are flushed.
|
||||
func isContainerFinalized(state *container.State) bool {
|
||||
// Container is finalized if it's not running and has a finish time
|
||||
return !state.Running && state.FinishedAt != ""
|
||||
}
|
||||
|
||||
// findProjectRoot locates the project root by finding the directory containing go.mod.
|
||||
func findProjectRoot(startPath string) string {
|
||||
current := startPath
|
||||
for {
|
||||
if _, err := os.Stat(filepath.Join(current, "go.mod")); err == nil {
|
||||
return current
|
||||
}
|
||||
parent := filepath.Dir(current)
|
||||
if parent == current {
|
||||
return startPath
|
||||
}
|
||||
current = parent
|
||||
}
|
||||
}
|
||||
|
||||
// boolToInt converts a boolean to an integer for environment variables.
|
||||
func boolToInt(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// DockerContext represents Docker context information.
|
||||
type DockerContext struct {
|
||||
Name string `json:"Name"`
|
||||
Metadata map[string]interface{} `json:"Metadata"`
|
||||
Endpoints map[string]interface{} `json:"Endpoints"`
|
||||
Current bool `json:"Current"`
|
||||
}
|
||||
|
||||
// createDockerClient creates a Docker client with context detection.
|
||||
func createDockerClient() (*client.Client, error) {
|
||||
contextInfo, err := getCurrentDockerContext()
|
||||
if err != nil {
|
||||
return client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
}
|
||||
|
||||
var clientOpts []client.Opt
|
||||
clientOpts = append(clientOpts, client.WithAPIVersionNegotiation())
|
||||
|
||||
if contextInfo != nil {
|
||||
if endpoints, ok := contextInfo.Endpoints["docker"]; ok {
|
||||
if endpointMap, ok := endpoints.(map[string]interface{}); ok {
|
||||
if host, ok := endpointMap["Host"].(string); ok {
|
||||
if runConfig.Verbose {
|
||||
log.Printf("Using Docker host from context '%s': %s", contextInfo.Name, host)
|
||||
}
|
||||
clientOpts = append(clientOpts, client.WithHost(host))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(clientOpts) == 1 {
|
||||
clientOpts = append(clientOpts, client.FromEnv)
|
||||
}
|
||||
|
||||
return client.NewClientWithOpts(clientOpts...)
|
||||
}
|
||||
|
||||
// getCurrentDockerContext retrieves the current Docker context information.
|
||||
func getCurrentDockerContext() (*DockerContext, error) {
|
||||
cmd := exec.Command("docker", "context", "inspect")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get docker context: %w", err)
|
||||
}
|
||||
|
||||
var contexts []DockerContext
|
||||
if err := json.Unmarshal(output, &contexts); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse docker context: %w", err)
|
||||
}
|
||||
|
||||
if len(contexts) > 0 {
|
||||
return &contexts[0], nil
|
||||
}
|
||||
|
||||
return nil, ErrNoDockerContext
|
||||
}
|
||||
|
||||
// getDockerSocketPath returns the correct Docker socket path for the current context.
|
||||
func getDockerSocketPath() string {
|
||||
// Always use the default socket path for mounting since Docker handles
|
||||
// the translation to the actual socket (e.g., colima socket) internally
|
||||
return "/var/run/docker.sock"
|
||||
}
|
||||
|
||||
// checkImageAvailableLocally checks if the specified Docker image is available locally.
|
||||
func checkImageAvailableLocally(ctx context.Context, cli *client.Client, imageName string) (bool, error) {
|
||||
_, _, err := cli.ImageInspectWithRaw(ctx, imageName)
|
||||
if err != nil {
|
||||
if client.IsErrNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to inspect image %s: %w", imageName, err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// ensureImageAvailable checks if the image is available locally first, then pulls if needed.
|
||||
func ensureImageAvailable(ctx context.Context, cli *client.Client, imageName string, verbose bool) error {
|
||||
// First check if image is available locally
|
||||
available, err := checkImageAvailableLocally(ctx, cli, imageName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check local image availability: %w", err)
|
||||
}
|
||||
|
||||
if available {
|
||||
if verbose {
|
||||
log.Printf("Image %s is available locally", imageName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Image not available locally, try to pull it
|
||||
if verbose {
|
||||
log.Printf("Image %s not found locally, pulling...", imageName)
|
||||
}
|
||||
|
||||
reader, err := cli.ImagePull(ctx, imageName, image.PullOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to pull image %s: %w", imageName, err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
if verbose {
|
||||
_, err = io.Copy(os.Stdout, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read pull output: %w", err)
|
||||
}
|
||||
} else {
|
||||
_, err = io.Copy(io.Discard, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read pull output: %w", err)
|
||||
}
|
||||
log.Printf("Image %s pulled successfully", imageName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// listControlFiles displays the headscale test artifacts created in the control logs directory.
|
||||
func listControlFiles(logsDir string) {
|
||||
entries, err := os.ReadDir(logsDir)
|
||||
if err != nil {
|
||||
log.Printf("Logs directory: %s", logsDir)
|
||||
return
|
||||
}
|
||||
|
||||
var logFiles []string
|
||||
var dataFiles []string
|
||||
var dataDirs []string
|
||||
|
||||
for _, entry := range entries {
|
||||
name := entry.Name()
|
||||
// Only show headscale (hs-*) files and directories
|
||||
if !strings.HasPrefix(name, "hs-") {
|
||||
continue
|
||||
}
|
||||
|
||||
if entry.IsDir() {
|
||||
// Include directories (pprof, mapresponses)
|
||||
if strings.Contains(name, "-pprof") || strings.Contains(name, "-mapresponses") {
|
||||
dataDirs = append(dataDirs, name)
|
||||
}
|
||||
} else {
|
||||
// Include files
|
||||
switch {
|
||||
case strings.HasSuffix(name, ".stderr.log") || strings.HasSuffix(name, ".stdout.log"):
|
||||
logFiles = append(logFiles, name)
|
||||
case strings.HasSuffix(name, ".db"):
|
||||
dataFiles = append(dataFiles, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Test artifacts saved to: %s", logsDir)
|
||||
|
||||
if len(logFiles) > 0 {
|
||||
log.Printf("Headscale logs:")
|
||||
for _, file := range logFiles {
|
||||
log.Printf(" %s", file)
|
||||
}
|
||||
}
|
||||
|
||||
if len(dataFiles) > 0 || len(dataDirs) > 0 {
|
||||
log.Printf("Headscale data:")
|
||||
for _, file := range dataFiles {
|
||||
log.Printf(" %s", file)
|
||||
}
|
||||
for _, dir := range dataDirs {
|
||||
log.Printf(" %s/", dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// extractArtifactsFromContainers collects container logs and files from the specific test run.
|
||||
func extractArtifactsFromContainers(ctx context.Context, testContainerID, logsDir string, verbose bool) error {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// List all containers
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers: %w", err)
|
||||
}
|
||||
|
||||
// Get containers from the specific test run
|
||||
currentTestContainers := getCurrentTestContainers(containers, testContainerID, verbose)
|
||||
|
||||
extractedCount := 0
|
||||
for _, cont := range currentTestContainers {
|
||||
// Extract container logs and tar files
|
||||
if err := extractContainerArtifacts(ctx, cli, cont.ID, cont.name, logsDir, verbose); err != nil {
|
||||
if verbose {
|
||||
log.Printf("Warning: failed to extract artifacts from container %s (%s): %v", cont.name, cont.ID[:12], err)
|
||||
}
|
||||
} else {
|
||||
if verbose {
|
||||
log.Printf("Extracted artifacts from container %s (%s)", cont.name, cont.ID[:12])
|
||||
}
|
||||
extractedCount++
|
||||
}
|
||||
}
|
||||
|
||||
if verbose && extractedCount > 0 {
|
||||
log.Printf("Extracted artifacts from %d containers", extractedCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// testContainer represents a container from the current test run.
|
||||
type testContainer struct {
|
||||
ID string
|
||||
name string
|
||||
}
|
||||
|
||||
// getCurrentTestContainers filters containers to only include those from the current test run.
|
||||
func getCurrentTestContainers(containers []container.Summary, testContainerID string, verbose bool) []testContainer {
|
||||
var testRunContainers []testContainer
|
||||
|
||||
// Find the test container to get its run ID label
|
||||
var runID string
|
||||
for _, cont := range containers {
|
||||
if cont.ID == testContainerID {
|
||||
if cont.Labels != nil {
|
||||
runID = cont.Labels["hi.run-id"]
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if runID == "" {
|
||||
log.Printf("Error: test container %s missing required hi.run-id label", testContainerID[:12])
|
||||
return testRunContainers
|
||||
}
|
||||
|
||||
if verbose {
|
||||
log.Printf("Looking for containers with run ID: %s", runID)
|
||||
}
|
||||
|
||||
// Find all containers with the same run ID
|
||||
for _, cont := range containers {
|
||||
for _, name := range cont.Names {
|
||||
containerName := strings.TrimPrefix(name, "/")
|
||||
if strings.HasPrefix(containerName, "hs-") || strings.HasPrefix(containerName, "ts-") {
|
||||
// Check if container has matching run ID label
|
||||
if cont.Labels != nil && cont.Labels["hi.run-id"] == runID {
|
||||
testRunContainers = append(testRunContainers, testContainer{
|
||||
ID: cont.ID,
|
||||
name: containerName,
|
||||
})
|
||||
if verbose {
|
||||
log.Printf("Including container %s (run ID: %s)", containerName, runID)
|
||||
}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return testRunContainers
|
||||
}
|
||||
|
||||
// extractContainerArtifacts saves logs and tar files from a container.
|
||||
func extractContainerArtifacts(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {
|
||||
// Ensure the logs directory exists
|
||||
if err := os.MkdirAll(logsDir, 0o755); err != nil {
|
||||
return fmt.Errorf("failed to create logs directory: %w", err)
|
||||
}
|
||||
|
||||
// Extract container logs
|
||||
if err := extractContainerLogs(ctx, cli, containerID, containerName, logsDir, verbose); err != nil {
|
||||
return fmt.Errorf("failed to extract logs: %w", err)
|
||||
}
|
||||
|
||||
// Extract tar files for headscale containers only
|
||||
if strings.HasPrefix(containerName, "hs-") {
|
||||
if err := extractContainerFiles(ctx, cli, containerID, containerName, logsDir, verbose); err != nil {
|
||||
if verbose {
|
||||
log.Printf("Warning: failed to extract files from %s: %v", containerName, err)
|
||||
}
|
||||
// Don't fail the whole extraction if files are missing
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractContainerLogs saves the stdout and stderr logs from a container to files.
|
||||
func extractContainerLogs(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {
|
||||
// Get container logs
|
||||
logReader, err := cli.ContainerLogs(ctx, containerID, container.LogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Timestamps: false,
|
||||
Follow: false,
|
||||
Tail: "all",
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get container logs: %w", err)
|
||||
}
|
||||
defer logReader.Close()
|
||||
|
||||
// Create log files following the headscale naming convention
|
||||
stdoutPath := filepath.Join(logsDir, containerName+".stdout.log")
|
||||
stderrPath := filepath.Join(logsDir, containerName+".stderr.log")
|
||||
|
||||
// Create buffers to capture stdout and stderr separately
|
||||
var stdoutBuf, stderrBuf bytes.Buffer
|
||||
|
||||
// Demultiplex the Docker logs stream to separate stdout and stderr
|
||||
_, err = stdcopy.StdCopy(&stdoutBuf, &stderrBuf, logReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to demultiplex container logs: %w", err)
|
||||
}
|
||||
|
||||
// Write stdout logs
|
||||
if err := os.WriteFile(stdoutPath, stdoutBuf.Bytes(), 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write stdout log: %w", err)
|
||||
}
|
||||
|
||||
// Write stderr logs
|
||||
if err := os.WriteFile(stderrPath, stderrBuf.Bytes(), 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write stderr log: %w", err)
|
||||
}
|
||||
|
||||
if verbose {
|
||||
log.Printf("Saved logs for %s: %s, %s", containerName, stdoutPath, stderrPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractContainerFiles extracts database file and directories from headscale containers.
|
||||
// Note: The actual file extraction is now handled by the integration tests themselves
|
||||
// via SaveProfile, SaveMapResponses, and SaveDatabase functions in hsic.go.
|
||||
func extractContainerFiles(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {
|
||||
// Files are now extracted directly by the integration tests
|
||||
// This function is kept for potential future use or other file types
|
||||
return nil
|
||||
}
|
||||
|
||||
// logExtractionError logs extraction errors with appropriate level based on error type.
|
||||
func logExtractionError(artifactType, containerName string, err error, verbose bool) {
|
||||
if errors.Is(err, ErrFileNotFoundInTar) {
|
||||
// File not found is expected and only logged in verbose mode
|
||||
if verbose {
|
||||
log.Printf("No %s found in container %s", artifactType, containerName)
|
||||
}
|
||||
} else {
|
||||
// Other errors are actual failures and should be logged as warnings
|
||||
log.Printf("Warning: failed to extract %s from %s: %v", artifactType, containerName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// extractSingleFile copies a single file from a container.
|
||||
func extractSingleFile(ctx context.Context, cli *client.Client, containerID, sourcePath, fileName, logsDir string, verbose bool) error {
|
||||
tarReader, _, err := cli.CopyFromContainer(ctx, containerID, sourcePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy %s from container: %w", sourcePath, err)
|
||||
}
|
||||
defer tarReader.Close()
|
||||
|
||||
// Extract the single file from the tar
|
||||
filePath := filepath.Join(logsDir, fileName)
|
||||
if err := extractFileFromTar(tarReader, filepath.Base(sourcePath), filePath); err != nil {
|
||||
return fmt.Errorf("failed to extract file from tar: %w", err)
|
||||
}
|
||||
|
||||
if verbose {
|
||||
log.Printf("Extracted %s from %s", fileName, containerID[:12])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractDirectory copies a directory from a container and extracts its contents.
|
||||
func extractDirectory(ctx context.Context, cli *client.Client, containerID, sourcePath, dirName, logsDir string, verbose bool) error {
|
||||
tarReader, _, err := cli.CopyFromContainer(ctx, containerID, sourcePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy %s from container: %w", sourcePath, err)
|
||||
}
|
||||
defer tarReader.Close()
|
||||
|
||||
// Create target directory
|
||||
targetDir := filepath.Join(logsDir, dirName)
|
||||
if err := os.MkdirAll(targetDir, 0o755); err != nil {
|
||||
return fmt.Errorf("failed to create directory %s: %w", targetDir, err)
|
||||
}
|
||||
|
||||
// Extract the directory from the tar
|
||||
if err := extractDirectoryFromTar(tarReader, targetDir); err != nil {
|
||||
return fmt.Errorf("failed to extract directory from tar: %w", err)
|
||||
}
|
||||
|
||||
if verbose {
|
||||
log.Printf("Extracted %s/ from %s", dirName, containerID[:12])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
374
cmd/hi/doctor.go
Normal file
374
cmd/hi/doctor.go
Normal file
@@ -0,0 +1,374 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var ErrSystemChecksFailed = errors.New("system checks failed")
|
||||
|
||||
// DoctorResult represents the result of a single health check.
|
||||
type DoctorResult struct {
|
||||
Name string
|
||||
Status string // "PASS", "FAIL", "WARN"
|
||||
Message string
|
||||
Suggestions []string
|
||||
}
|
||||
|
||||
// runDoctorCheck performs comprehensive pre-flight checks for integration testing.
|
||||
func runDoctorCheck(ctx context.Context) error {
|
||||
results := []DoctorResult{}
|
||||
|
||||
// Check 1: Docker binary availability
|
||||
results = append(results, checkDockerBinary())
|
||||
|
||||
// Check 2: Docker daemon connectivity
|
||||
dockerResult := checkDockerDaemon(ctx)
|
||||
results = append(results, dockerResult)
|
||||
|
||||
// If Docker is available, run additional checks
|
||||
if dockerResult.Status == "PASS" {
|
||||
results = append(results, checkDockerContext(ctx))
|
||||
results = append(results, checkDockerSocket(ctx))
|
||||
results = append(results, checkGolangImage(ctx))
|
||||
}
|
||||
|
||||
// Check 3: Go installation
|
||||
results = append(results, checkGoInstallation())
|
||||
|
||||
// Check 4: Git repository
|
||||
results = append(results, checkGitRepository())
|
||||
|
||||
// Check 5: Required files
|
||||
results = append(results, checkRequiredFiles())
|
||||
|
||||
// Display results
|
||||
displayDoctorResults(results)
|
||||
|
||||
// Return error if any critical checks failed
|
||||
for _, result := range results {
|
||||
if result.Status == "FAIL" {
|
||||
return fmt.Errorf("%w - see details above", ErrSystemChecksFailed)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("✅ All system checks passed - ready to run integration tests!")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkDockerBinary verifies Docker binary is available.
|
||||
func checkDockerBinary() DoctorResult {
|
||||
_, err := exec.LookPath("docker")
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Binary",
|
||||
Status: "FAIL",
|
||||
Message: "Docker binary not found in PATH",
|
||||
Suggestions: []string{
|
||||
"Install Docker: https://docs.docker.com/get-docker/",
|
||||
"For macOS: consider using colima or Docker Desktop",
|
||||
"Ensure docker is in your PATH",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorResult{
|
||||
Name: "Docker Binary",
|
||||
Status: "PASS",
|
||||
Message: "Docker binary found",
|
||||
}
|
||||
}
|
||||
|
||||
// checkDockerDaemon verifies Docker daemon is running and accessible.
|
||||
func checkDockerDaemon(ctx context.Context) DoctorResult {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Daemon",
|
||||
Status: "FAIL",
|
||||
Message: fmt.Sprintf("Cannot create Docker client: %v", err),
|
||||
Suggestions: []string{
|
||||
"Start Docker daemon/service",
|
||||
"Check Docker Desktop is running (if using Docker Desktop)",
|
||||
"For colima: run 'colima start'",
|
||||
"Verify DOCKER_HOST environment variable if set",
|
||||
},
|
||||
}
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
_, err = cli.Ping(ctx)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Daemon",
|
||||
Status: "FAIL",
|
||||
Message: fmt.Sprintf("Cannot ping Docker daemon: %v", err),
|
||||
Suggestions: []string{
|
||||
"Ensure Docker daemon is running",
|
||||
"Check Docker socket permissions",
|
||||
"Try: docker info",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorResult{
|
||||
Name: "Docker Daemon",
|
||||
Status: "PASS",
|
||||
Message: "Docker daemon is running and accessible",
|
||||
}
|
||||
}
|
||||
|
||||
// checkDockerContext verifies Docker context configuration.
|
||||
func checkDockerContext(_ context.Context) DoctorResult {
|
||||
contextInfo, err := getCurrentDockerContext()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Context",
|
||||
Status: "WARN",
|
||||
Message: "Could not detect Docker context, using default settings",
|
||||
Suggestions: []string{
|
||||
"Check: docker context ls",
|
||||
"Consider setting up a specific context if needed",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if contextInfo == nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Context",
|
||||
Status: "PASS",
|
||||
Message: "Using default Docker context",
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorResult{
|
||||
Name: "Docker Context",
|
||||
Status: "PASS",
|
||||
Message: "Using Docker context: " + contextInfo.Name,
|
||||
}
|
||||
}
|
||||
|
||||
// checkDockerSocket verifies Docker socket accessibility.
|
||||
func checkDockerSocket(ctx context.Context) DoctorResult {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Socket",
|
||||
Status: "FAIL",
|
||||
Message: fmt.Sprintf("Cannot access Docker socket: %v", err),
|
||||
Suggestions: []string{
|
||||
"Check Docker socket permissions",
|
||||
"Add user to docker group: sudo usermod -aG docker $USER",
|
||||
"For colima: ensure socket is accessible",
|
||||
},
|
||||
}
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
info, err := cli.Info(ctx)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Socket",
|
||||
Status: "FAIL",
|
||||
Message: fmt.Sprintf("Cannot get Docker info: %v", err),
|
||||
Suggestions: []string{
|
||||
"Check Docker daemon status",
|
||||
"Verify socket permissions",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorResult{
|
||||
Name: "Docker Socket",
|
||||
Status: "PASS",
|
||||
Message: fmt.Sprintf("Docker socket accessible (Server: %s)", info.ServerVersion),
|
||||
}
|
||||
}
|
||||
|
||||
// checkGolangImage verifies the golang Docker image is available locally or can be pulled.
|
||||
func checkGolangImage(ctx context.Context) DoctorResult {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Golang Image",
|
||||
Status: "FAIL",
|
||||
Message: "Cannot create Docker client for image check",
|
||||
}
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
goVersion := detectGoVersion()
|
||||
imageName := "golang:" + goVersion
|
||||
|
||||
// First check if image is available locally
|
||||
available, err := checkImageAvailableLocally(ctx, cli, imageName)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Golang Image",
|
||||
Status: "FAIL",
|
||||
Message: fmt.Sprintf("Cannot check golang image %s: %v", imageName, err),
|
||||
Suggestions: []string{
|
||||
"Check Docker daemon status",
|
||||
"Try: docker images | grep golang",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if available {
|
||||
return DoctorResult{
|
||||
Name: "Golang Image",
|
||||
Status: "PASS",
|
||||
Message: fmt.Sprintf("Golang image %s is available locally", imageName),
|
||||
}
|
||||
}
|
||||
|
||||
// Image not available locally, try to pull it
|
||||
err = ensureImageAvailable(ctx, cli, imageName, false)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Golang Image",
|
||||
Status: "FAIL",
|
||||
Message: fmt.Sprintf("Golang image %s not available locally and cannot pull: %v", imageName, err),
|
||||
Suggestions: []string{
|
||||
"Check internet connectivity",
|
||||
"Verify Docker Hub access",
|
||||
"Try: docker pull " + imageName,
|
||||
"Or run tests offline if image was pulled previously",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorResult{
|
||||
Name: "Golang Image",
|
||||
Status: "PASS",
|
||||
Message: fmt.Sprintf("Golang image %s is now available", imageName),
|
||||
}
|
||||
}
|
||||
|
||||
// checkGoInstallation verifies Go is installed and working.
|
||||
func checkGoInstallation() DoctorResult {
|
||||
_, err := exec.LookPath("go")
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Go Installation",
|
||||
Status: "FAIL",
|
||||
Message: "Go binary not found in PATH",
|
||||
Suggestions: []string{
|
||||
"Install Go: https://golang.org/dl/",
|
||||
"Ensure go is in your PATH",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", "version")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Go Installation",
|
||||
Status: "FAIL",
|
||||
Message: fmt.Sprintf("Cannot get Go version: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
version := strings.TrimSpace(string(output))
|
||||
|
||||
return DoctorResult{
|
||||
Name: "Go Installation",
|
||||
Status: "PASS",
|
||||
Message: version,
|
||||
}
|
||||
}
|
||||
|
||||
// checkGitRepository verifies we're in a git repository.
|
||||
func checkGitRepository() DoctorResult {
|
||||
cmd := exec.Command("git", "rev-parse", "--git-dir")
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Git Repository",
|
||||
Status: "FAIL",
|
||||
Message: "Not in a Git repository",
|
||||
Suggestions: []string{
|
||||
"Run from within the headscale git repository",
|
||||
"Clone the repository: git clone https://github.com/juanfont/headscale.git",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorResult{
|
||||
Name: "Git Repository",
|
||||
Status: "PASS",
|
||||
Message: "Running in Git repository",
|
||||
}
|
||||
}
|
||||
|
||||
// checkRequiredFiles verifies required files exist.
|
||||
func checkRequiredFiles() DoctorResult {
|
||||
requiredFiles := []string{
|
||||
"go.mod",
|
||||
"integration/",
|
||||
"cmd/hi/",
|
||||
}
|
||||
|
||||
var missingFiles []string
|
||||
for _, file := range requiredFiles {
|
||||
cmd := exec.Command("test", "-e", file)
|
||||
if err := cmd.Run(); err != nil {
|
||||
missingFiles = append(missingFiles, file)
|
||||
}
|
||||
}
|
||||
|
||||
if len(missingFiles) > 0 {
|
||||
return DoctorResult{
|
||||
Name: "Required Files",
|
||||
Status: "FAIL",
|
||||
Message: "Missing required files: " + strings.Join(missingFiles, ", "),
|
||||
Suggestions: []string{
|
||||
"Ensure you're in the headscale project root directory",
|
||||
"Check that integration/ directory exists",
|
||||
"Verify this is a complete headscale repository",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorResult{
|
||||
Name: "Required Files",
|
||||
Status: "PASS",
|
||||
Message: "All required files found",
|
||||
}
|
||||
}
|
||||
|
||||
// displayDoctorResults shows the results in a formatted way.
|
||||
func displayDoctorResults(results []DoctorResult) {
|
||||
log.Printf("🔍 System Health Check Results")
|
||||
log.Printf("================================")
|
||||
|
||||
for _, result := range results {
|
||||
var icon string
|
||||
switch result.Status {
|
||||
case "PASS":
|
||||
icon = "✅"
|
||||
case "WARN":
|
||||
icon = "⚠️"
|
||||
case "FAIL":
|
||||
icon = "❌"
|
||||
default:
|
||||
icon = "❓"
|
||||
}
|
||||
|
||||
log.Printf("%s %s: %s", icon, result.Name, result.Message)
|
||||
|
||||
if len(result.Suggestions) > 0 {
|
||||
for _, suggestion := range result.Suggestions {
|
||||
log.Printf(" 💡 %s", suggestion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("================================")
|
||||
}
|
||||
93
cmd/hi/main.go
Normal file
93
cmd/hi/main.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/creachadair/command"
|
||||
"github.com/creachadair/flax"
|
||||
)
|
||||
|
||||
var runConfig RunConfig
|
||||
|
||||
func main() {
|
||||
root := command.C{
|
||||
Name: "hi",
|
||||
Help: "Headscale Integration test runner",
|
||||
Commands: []*command.C{
|
||||
{
|
||||
Name: "run",
|
||||
Help: "Run integration tests",
|
||||
Usage: "run [test-pattern] [flags]",
|
||||
SetFlags: command.Flags(flax.MustBind, &runConfig),
|
||||
Run: runIntegrationTest,
|
||||
},
|
||||
{
|
||||
Name: "doctor",
|
||||
Help: "Check system requirements for running integration tests",
|
||||
Run: func(env *command.Env) error {
|
||||
return runDoctorCheck(env.Context())
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "clean",
|
||||
Help: "Clean Docker resources",
|
||||
Commands: []*command.C{
|
||||
{
|
||||
Name: "networks",
|
||||
Help: "Prune unused Docker networks",
|
||||
Run: func(env *command.Env) error {
|
||||
return pruneDockerNetworks(env.Context())
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "images",
|
||||
Help: "Clean old test images",
|
||||
Run: func(env *command.Env) error {
|
||||
return cleanOldImages(env.Context())
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "containers",
|
||||
Help: "Kill all test containers",
|
||||
Run: func(env *command.Env) error {
|
||||
return killTestContainers(env.Context())
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "cache",
|
||||
Help: "Clean Go module cache volume",
|
||||
Run: func(env *command.Env) error {
|
||||
return cleanCacheVolume(env.Context())
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "all",
|
||||
Help: "Run all cleanup operations",
|
||||
Run: func(env *command.Env) error {
|
||||
return cleanAll(env.Context())
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
command.HelpCommand(nil),
|
||||
},
|
||||
}
|
||||
|
||||
env := root.NewEnv(nil).MergeFlags(true)
|
||||
command.RunOrFail(env, os.Args[1:])
|
||||
}
|
||||
|
||||
func cleanAll(ctx context.Context) error {
|
||||
if err := killTestContainers(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pruneDockerNetworks(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cleanOldImages(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cleanCacheVolume(ctx)
|
||||
}
|
||||
125
cmd/hi/run.go
Normal file
125
cmd/hi/run.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/creachadair/command"
|
||||
)
|
||||
|
||||
var ErrTestPatternRequired = errors.New("test pattern is required as first argument or use --test flag")
|
||||
|
||||
type RunConfig struct {
|
||||
TestPattern string `flag:"test,Test pattern to run"`
|
||||
Timeout time.Duration `flag:"timeout,default=120m,Test timeout"`
|
||||
FailFast bool `flag:"failfast,default=true,Stop on first test failure"`
|
||||
UsePostgres bool `flag:"postgres,default=false,Use PostgreSQL instead of SQLite"`
|
||||
GoVersion string `flag:"go-version,Go version to use (auto-detected from go.mod)"`
|
||||
CleanBefore bool `flag:"clean-before,default=true,Clean resources before test"`
|
||||
CleanAfter bool `flag:"clean-after,default=true,Clean resources after test"`
|
||||
KeepOnFailure bool `flag:"keep-on-failure,default=false,Keep containers on test failure"`
|
||||
LogsDir string `flag:"logs-dir,default=control_logs,Control logs directory"`
|
||||
Verbose bool `flag:"verbose,default=false,Verbose output"`
|
||||
Stats bool `flag:"stats,default=false,Collect and display container resource usage statistics"`
|
||||
HSMemoryLimit float64 `flag:"hs-memory-limit,default=0,Fail test if any Headscale container exceeds this memory limit in MB (0 = disabled)"`
|
||||
TSMemoryLimit float64 `flag:"ts-memory-limit,default=0,Fail test if any Tailscale container exceeds this memory limit in MB (0 = disabled)"`
|
||||
}
|
||||
|
||||
// runIntegrationTest executes the integration test workflow.
|
||||
func runIntegrationTest(env *command.Env) error {
|
||||
args := env.Args
|
||||
if len(args) > 0 && runConfig.TestPattern == "" {
|
||||
runConfig.TestPattern = args[0]
|
||||
}
|
||||
|
||||
if runConfig.TestPattern == "" {
|
||||
return ErrTestPatternRequired
|
||||
}
|
||||
|
||||
if runConfig.GoVersion == "" {
|
||||
runConfig.GoVersion = detectGoVersion()
|
||||
}
|
||||
|
||||
// Run pre-flight checks
|
||||
if runConfig.Verbose {
|
||||
log.Printf("Running pre-flight system checks...")
|
||||
}
|
||||
if err := runDoctorCheck(env.Context()); err != nil {
|
||||
return fmt.Errorf("pre-flight checks failed: %w", err)
|
||||
}
|
||||
|
||||
if runConfig.Verbose {
|
||||
log.Printf("Running test: %s", runConfig.TestPattern)
|
||||
log.Printf("Go version: %s", runConfig.GoVersion)
|
||||
log.Printf("Timeout: %s", runConfig.Timeout)
|
||||
log.Printf("Use PostgreSQL: %t", runConfig.UsePostgres)
|
||||
}
|
||||
|
||||
return runTestContainer(env.Context(), &runConfig)
|
||||
}
|
||||
|
||||
// detectGoVersion reads the Go version from go.mod file.
|
||||
func detectGoVersion() string {
|
||||
goModPath := filepath.Join("..", "..", "go.mod")
|
||||
|
||||
if _, err := os.Stat("go.mod"); err == nil {
|
||||
goModPath = "go.mod"
|
||||
} else if _, err := os.Stat("../../go.mod"); err == nil {
|
||||
goModPath = "../../go.mod"
|
||||
}
|
||||
|
||||
content, err := os.ReadFile(goModPath)
|
||||
if err != nil {
|
||||
return "1.24"
|
||||
}
|
||||
|
||||
lines := splitLines(string(content))
|
||||
for _, line := range lines {
|
||||
if len(line) > 3 && line[:3] == "go " {
|
||||
version := line[3:]
|
||||
if idx := indexOf(version, " "); idx != -1 {
|
||||
version = version[:idx]
|
||||
}
|
||||
|
||||
return version
|
||||
}
|
||||
}
|
||||
|
||||
return "1.24"
|
||||
}
|
||||
|
||||
// splitLines splits a string into lines without using strings.Split.
|
||||
func splitLines(s string) []string {
|
||||
var lines []string
|
||||
var current string
|
||||
|
||||
for _, char := range s {
|
||||
if char == '\n' {
|
||||
lines = append(lines, current)
|
||||
current = ""
|
||||
} else {
|
||||
current += string(char)
|
||||
}
|
||||
}
|
||||
|
||||
if current != "" {
|
||||
lines = append(lines, current)
|
||||
}
|
||||
|
||||
return lines
|
||||
}
|
||||
|
||||
// indexOf finds the first occurrence of substr in s.
|
||||
func indexOf(s, substr string) int {
|
||||
for i := 0; i <= len(s)-len(substr); i++ {
|
||||
if s[i:i+len(substr)] == substr {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
468
cmd/hi/stats.go
Normal file
468
cmd/hi/stats.go
Normal file
@@ -0,0 +1,468 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
// ContainerStats represents statistics for a single container
|
||||
type ContainerStats struct {
|
||||
ContainerID string
|
||||
ContainerName string
|
||||
Stats []StatsSample
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// StatsSample represents a single stats measurement
|
||||
type StatsSample struct {
|
||||
Timestamp time.Time
|
||||
CPUUsage float64 // CPU usage percentage
|
||||
MemoryMB float64 // Memory usage in MB
|
||||
}
|
||||
|
||||
// StatsCollector manages collection of container statistics
|
||||
type StatsCollector struct {
|
||||
client *client.Client
|
||||
containers map[string]*ContainerStats
|
||||
stopChan chan struct{}
|
||||
wg sync.WaitGroup
|
||||
mutex sync.RWMutex
|
||||
collectionStarted bool
|
||||
}
|
||||
|
||||
// NewStatsCollector creates a new stats collector instance
|
||||
func NewStatsCollector() (*StatsCollector, error) {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Docker client: %w", err)
|
||||
}
|
||||
|
||||
return &StatsCollector{
|
||||
client: cli,
|
||||
containers: make(map[string]*ContainerStats),
|
||||
stopChan: make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// StartCollection begins monitoring all containers and collecting stats for hs- and ts- containers with matching run ID
|
||||
func (sc *StatsCollector) StartCollection(ctx context.Context, runID string, verbose bool) error {
|
||||
sc.mutex.Lock()
|
||||
defer sc.mutex.Unlock()
|
||||
|
||||
if sc.collectionStarted {
|
||||
return fmt.Errorf("stats collection already started")
|
||||
}
|
||||
|
||||
sc.collectionStarted = true
|
||||
|
||||
// Start monitoring existing containers
|
||||
sc.wg.Add(1)
|
||||
go sc.monitorExistingContainers(ctx, runID, verbose)
|
||||
|
||||
// Start Docker events monitoring for new containers
|
||||
sc.wg.Add(1)
|
||||
go sc.monitorDockerEvents(ctx, runID, verbose)
|
||||
|
||||
if verbose {
|
||||
log.Printf("Started container monitoring for run ID %s", runID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopCollection stops all stats collection
|
||||
func (sc *StatsCollector) StopCollection() {
|
||||
// Check if already stopped without holding lock
|
||||
sc.mutex.RLock()
|
||||
if !sc.collectionStarted {
|
||||
sc.mutex.RUnlock()
|
||||
return
|
||||
}
|
||||
sc.mutex.RUnlock()
|
||||
|
||||
// Signal stop to all goroutines
|
||||
close(sc.stopChan)
|
||||
|
||||
// Wait for all goroutines to finish
|
||||
sc.wg.Wait()
|
||||
|
||||
// Mark as stopped
|
||||
sc.mutex.Lock()
|
||||
sc.collectionStarted = false
|
||||
sc.mutex.Unlock()
|
||||
}
|
||||
|
||||
// monitorExistingContainers checks for existing containers that match our criteria
|
||||
func (sc *StatsCollector) monitorExistingContainers(ctx context.Context, runID string, verbose bool) {
|
||||
defer sc.wg.Done()
|
||||
|
||||
containers, err := sc.client.ContainerList(ctx, container.ListOptions{})
|
||||
if err != nil {
|
||||
if verbose {
|
||||
log.Printf("Failed to list existing containers: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for _, cont := range containers {
|
||||
if sc.shouldMonitorContainer(cont, runID) {
|
||||
sc.startStatsForContainer(ctx, cont.ID, cont.Names[0], verbose)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// monitorDockerEvents listens for container start events and begins monitoring relevant containers
|
||||
func (sc *StatsCollector) monitorDockerEvents(ctx context.Context, runID string, verbose bool) {
|
||||
defer sc.wg.Done()
|
||||
|
||||
filter := filters.NewArgs()
|
||||
filter.Add("type", "container")
|
||||
filter.Add("event", "start")
|
||||
|
||||
eventOptions := events.ListOptions{
|
||||
Filters: filter,
|
||||
}
|
||||
|
||||
events, errs := sc.client.Events(ctx, eventOptions)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-sc.stopChan:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case event := <-events:
|
||||
if event.Type == "container" && event.Action == "start" {
|
||||
// Get container details
|
||||
containerInfo, err := sc.client.ContainerInspect(ctx, event.ID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Convert to types.Container format for consistency
|
||||
cont := types.Container{
|
||||
ID: containerInfo.ID,
|
||||
Names: []string{containerInfo.Name},
|
||||
Labels: containerInfo.Config.Labels,
|
||||
}
|
||||
|
||||
if sc.shouldMonitorContainer(cont, runID) {
|
||||
sc.startStatsForContainer(ctx, cont.ID, cont.Names[0], verbose)
|
||||
}
|
||||
}
|
||||
case err := <-errs:
|
||||
if verbose {
|
||||
log.Printf("Error in Docker events stream: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// shouldMonitorContainer determines if a container should be monitored
|
||||
func (sc *StatsCollector) shouldMonitorContainer(cont types.Container, runID string) bool {
|
||||
// Check if it has the correct run ID label
|
||||
if cont.Labels == nil || cont.Labels["hi.run-id"] != runID {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if it's an hs- or ts- container
|
||||
for _, name := range cont.Names {
|
||||
containerName := strings.TrimPrefix(name, "/")
|
||||
if strings.HasPrefix(containerName, "hs-") || strings.HasPrefix(containerName, "ts-") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// startStatsForContainer begins stats collection for a specific container
|
||||
func (sc *StatsCollector) startStatsForContainer(ctx context.Context, containerID, containerName string, verbose bool) {
|
||||
containerName = strings.TrimPrefix(containerName, "/")
|
||||
|
||||
sc.mutex.Lock()
|
||||
// Check if we're already monitoring this container
|
||||
if _, exists := sc.containers[containerID]; exists {
|
||||
sc.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
sc.containers[containerID] = &ContainerStats{
|
||||
ContainerID: containerID,
|
||||
ContainerName: containerName,
|
||||
Stats: make([]StatsSample, 0),
|
||||
}
|
||||
sc.mutex.Unlock()
|
||||
|
||||
if verbose {
|
||||
log.Printf("Starting stats collection for container %s (%s)", containerName, containerID[:12])
|
||||
}
|
||||
|
||||
sc.wg.Add(1)
|
||||
go sc.collectStatsForContainer(ctx, containerID, verbose)
|
||||
}
|
||||
|
||||
// collectStatsForContainer collects stats for a specific container using Docker API streaming
|
||||
func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containerID string, verbose bool) {
|
||||
defer sc.wg.Done()
|
||||
|
||||
// Use Docker API streaming stats - much more efficient than CLI
|
||||
statsResponse, err := sc.client.ContainerStats(ctx, containerID, true)
|
||||
if err != nil {
|
||||
if verbose {
|
||||
log.Printf("Failed to get stats stream for container %s: %v", containerID[:12], err)
|
||||
}
|
||||
return
|
||||
}
|
||||
defer statsResponse.Body.Close()
|
||||
|
||||
decoder := json.NewDecoder(statsResponse.Body)
|
||||
var prevStats *container.Stats
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-sc.stopChan:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
var stats container.Stats
|
||||
if err := decoder.Decode(&stats); err != nil {
|
||||
// EOF is expected when container stops or stream ends
|
||||
if err.Error() != "EOF" && verbose {
|
||||
log.Printf("Failed to decode stats for container %s: %v", containerID[:12], err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate CPU percentage (only if we have previous stats)
|
||||
var cpuPercent float64
|
||||
if prevStats != nil {
|
||||
cpuPercent = calculateCPUPercent(prevStats, &stats)
|
||||
}
|
||||
|
||||
// Calculate memory usage in MB
|
||||
memoryMB := float64(stats.MemoryStats.Usage) / (1024 * 1024)
|
||||
|
||||
// Store the sample (skip first sample since CPU calculation needs previous stats)
|
||||
if prevStats != nil {
|
||||
// Get container stats reference without holding the main mutex
|
||||
var containerStats *ContainerStats
|
||||
var exists bool
|
||||
|
||||
sc.mutex.RLock()
|
||||
containerStats, exists = sc.containers[containerID]
|
||||
sc.mutex.RUnlock()
|
||||
|
||||
if exists && containerStats != nil {
|
||||
containerStats.mutex.Lock()
|
||||
containerStats.Stats = append(containerStats.Stats, StatsSample{
|
||||
Timestamp: time.Now(),
|
||||
CPUUsage: cpuPercent,
|
||||
MemoryMB: memoryMB,
|
||||
})
|
||||
containerStats.mutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Save current stats for next iteration
|
||||
prevStats = &stats
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// calculateCPUPercent calculates CPU usage percentage from Docker stats
|
||||
func calculateCPUPercent(prevStats, stats *container.Stats) float64 {
|
||||
// CPU calculation based on Docker's implementation
|
||||
cpuDelta := float64(stats.CPUStats.CPUUsage.TotalUsage) - float64(prevStats.CPUStats.CPUUsage.TotalUsage)
|
||||
systemDelta := float64(stats.CPUStats.SystemUsage) - float64(prevStats.CPUStats.SystemUsage)
|
||||
|
||||
if systemDelta > 0 && cpuDelta >= 0 {
|
||||
// Calculate CPU percentage: (container CPU delta / system CPU delta) * number of CPUs * 100
|
||||
numCPUs := float64(len(stats.CPUStats.CPUUsage.PercpuUsage))
|
||||
if numCPUs == 0 {
|
||||
// Fallback: if PercpuUsage is not available, assume 1 CPU
|
||||
numCPUs = 1.0
|
||||
}
|
||||
return (cpuDelta / systemDelta) * numCPUs * 100.0
|
||||
}
|
||||
return 0.0
|
||||
}
|
||||
|
||||
// ContainerStatsSummary represents summary statistics for a container
|
||||
type ContainerStatsSummary struct {
|
||||
ContainerName string
|
||||
SampleCount int
|
||||
CPU StatsSummary
|
||||
Memory StatsSummary
|
||||
}
|
||||
|
||||
// MemoryViolation represents a container that exceeded the memory limit
|
||||
type MemoryViolation struct {
|
||||
ContainerName string
|
||||
MaxMemoryMB float64
|
||||
LimitMB float64
|
||||
}
|
||||
|
||||
// StatsSummary represents min, max, and average for a metric
|
||||
type StatsSummary struct {
|
||||
Min float64
|
||||
Max float64
|
||||
Average float64
|
||||
}
|
||||
|
||||
// GetSummary returns a summary of collected statistics
|
||||
func (sc *StatsCollector) GetSummary() []ContainerStatsSummary {
|
||||
// Take snapshot of container references without holding main lock long
|
||||
sc.mutex.RLock()
|
||||
containerRefs := make([]*ContainerStats, 0, len(sc.containers))
|
||||
for _, containerStats := range sc.containers {
|
||||
containerRefs = append(containerRefs, containerStats)
|
||||
}
|
||||
sc.mutex.RUnlock()
|
||||
|
||||
summaries := make([]ContainerStatsSummary, 0, len(containerRefs))
|
||||
|
||||
for _, containerStats := range containerRefs {
|
||||
containerStats.mutex.RLock()
|
||||
stats := make([]StatsSample, len(containerStats.Stats))
|
||||
copy(stats, containerStats.Stats)
|
||||
containerName := containerStats.ContainerName
|
||||
containerStats.mutex.RUnlock()
|
||||
|
||||
if len(stats) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
summary := ContainerStatsSummary{
|
||||
ContainerName: containerName,
|
||||
SampleCount: len(stats),
|
||||
}
|
||||
|
||||
// Calculate CPU stats
|
||||
cpuValues := make([]float64, len(stats))
|
||||
memoryValues := make([]float64, len(stats))
|
||||
|
||||
for i, sample := range stats {
|
||||
cpuValues[i] = sample.CPUUsage
|
||||
memoryValues[i] = sample.MemoryMB
|
||||
}
|
||||
|
||||
summary.CPU = calculateStatsSummary(cpuValues)
|
||||
summary.Memory = calculateStatsSummary(memoryValues)
|
||||
|
||||
summaries = append(summaries, summary)
|
||||
}
|
||||
|
||||
// Sort by container name for consistent output
|
||||
sort.Slice(summaries, func(i, j int) bool {
|
||||
return summaries[i].ContainerName < summaries[j].ContainerName
|
||||
})
|
||||
|
||||
return summaries
|
||||
}
|
||||
|
||||
// calculateStatsSummary calculates min, max, and average for a slice of values
|
||||
func calculateStatsSummary(values []float64) StatsSummary {
|
||||
if len(values) == 0 {
|
||||
return StatsSummary{}
|
||||
}
|
||||
|
||||
min := values[0]
|
||||
max := values[0]
|
||||
sum := 0.0
|
||||
|
||||
for _, value := range values {
|
||||
if value < min {
|
||||
min = value
|
||||
}
|
||||
if value > max {
|
||||
max = value
|
||||
}
|
||||
sum += value
|
||||
}
|
||||
|
||||
return StatsSummary{
|
||||
Min: min,
|
||||
Max: max,
|
||||
Average: sum / float64(len(values)),
|
||||
}
|
||||
}
|
||||
|
||||
// PrintSummary prints the statistics summary to the console
|
||||
func (sc *StatsCollector) PrintSummary() {
|
||||
summaries := sc.GetSummary()
|
||||
|
||||
if len(summaries) == 0 {
|
||||
log.Printf("No container statistics collected")
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("Container Resource Usage Summary:")
|
||||
log.Printf("================================")
|
||||
|
||||
for _, summary := range summaries {
|
||||
log.Printf("Container: %s (%d samples)", summary.ContainerName, summary.SampleCount)
|
||||
log.Printf(" CPU Usage: Min: %6.2f%% Max: %6.2f%% Avg: %6.2f%%",
|
||||
summary.CPU.Min, summary.CPU.Max, summary.CPU.Average)
|
||||
log.Printf(" Memory Usage: Min: %6.1f MB Max: %6.1f MB Avg: %6.1f MB",
|
||||
summary.Memory.Min, summary.Memory.Max, summary.Memory.Average)
|
||||
log.Printf("")
|
||||
}
|
||||
}
|
||||
|
||||
// CheckMemoryLimits checks if any containers exceeded their memory limits
|
||||
func (sc *StatsCollector) CheckMemoryLimits(hsLimitMB, tsLimitMB float64) []MemoryViolation {
|
||||
if hsLimitMB <= 0 && tsLimitMB <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
summaries := sc.GetSummary()
|
||||
var violations []MemoryViolation
|
||||
|
||||
for _, summary := range summaries {
|
||||
var limitMB float64
|
||||
if strings.HasPrefix(summary.ContainerName, "hs-") {
|
||||
limitMB = hsLimitMB
|
||||
} else if strings.HasPrefix(summary.ContainerName, "ts-") {
|
||||
limitMB = tsLimitMB
|
||||
} else {
|
||||
continue // Skip containers that don't match our patterns
|
||||
}
|
||||
|
||||
if limitMB > 0 && summary.Memory.Max > limitMB {
|
||||
violations = append(violations, MemoryViolation{
|
||||
ContainerName: summary.ContainerName,
|
||||
MaxMemoryMB: summary.Memory.Max,
|
||||
LimitMB: limitMB,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return violations
|
||||
}
|
||||
|
||||
// PrintSummaryAndCheckLimits prints the statistics summary and returns memory violations if any
|
||||
func (sc *StatsCollector) PrintSummaryAndCheckLimits(hsLimitMB, tsLimitMB float64) []MemoryViolation {
|
||||
sc.PrintSummary()
|
||||
return sc.CheckMemoryLimits(hsLimitMB, tsLimitMB)
|
||||
}
|
||||
|
||||
// Close closes the stats collector and cleans up resources
|
||||
func (sc *StatsCollector) Close() error {
|
||||
sc.StopCollection()
|
||||
return sc.client.Close()
|
||||
}
|
||||
100
cmd/hi/tar_utils.go
Normal file
100
cmd/hi/tar_utils.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrFileNotFoundInTar indicates a file was not found in the tar archive.
|
||||
var ErrFileNotFoundInTar = errors.New("file not found in tar")
|
||||
|
||||
// extractFileFromTar extracts a single file from a tar reader.
|
||||
func extractFileFromTar(tarReader io.Reader, fileName, outputPath string) error {
|
||||
tr := tar.NewReader(tarReader)
|
||||
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read tar header: %w", err)
|
||||
}
|
||||
|
||||
// Check if this is the file we're looking for
|
||||
if filepath.Base(header.Name) == fileName {
|
||||
if header.Typeflag == tar.TypeReg {
|
||||
// Create the output file
|
||||
outFile, err := os.Create(outputPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create output file: %w", err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
// Copy file contents
|
||||
if _, err := io.Copy(outFile, tr); err != nil {
|
||||
return fmt.Errorf("failed to copy file contents: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("%w: %s", ErrFileNotFoundInTar, fileName)
|
||||
}
|
||||
|
||||
// extractDirectoryFromTar extracts all files from a tar reader to a target directory.
|
||||
func extractDirectoryFromTar(tarReader io.Reader, targetDir string) error {
|
||||
tr := tar.NewReader(tarReader)
|
||||
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read tar header: %w", err)
|
||||
}
|
||||
|
||||
// Clean the path to prevent directory traversal
|
||||
cleanName := filepath.Clean(header.Name)
|
||||
if strings.Contains(cleanName, "..") {
|
||||
continue // Skip potentially dangerous paths
|
||||
}
|
||||
|
||||
targetPath := filepath.Join(targetDir, filepath.Base(cleanName))
|
||||
|
||||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
// Create directory
|
||||
if err := os.MkdirAll(targetPath, os.FileMode(header.Mode)); err != nil {
|
||||
return fmt.Errorf("failed to create directory %s: %w", targetPath, err)
|
||||
}
|
||||
case tar.TypeReg:
|
||||
// Create file
|
||||
outFile, err := os.Create(targetPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create file %s: %w", targetPath, err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(outFile, tr); err != nil {
|
||||
outFile.Close()
|
||||
return fmt.Errorf("failed to copy file contents: %w", err)
|
||||
}
|
||||
outFile.Close()
|
||||
|
||||
// Set file permissions
|
||||
if err := os.Chmod(targetPath, os.FileMode(header.Mode)); err != nil {
|
||||
return fmt.Errorf("failed to set file permissions: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -18,10 +18,8 @@ server_url: http://127.0.0.1:8080
|
||||
# listen_addr: 0.0.0.0:8080
|
||||
listen_addr: 127.0.0.1:8080
|
||||
|
||||
# Address to listen to /metrics, you may want
|
||||
# to keep this endpoint private to your internal
|
||||
# network
|
||||
#
|
||||
# Address to listen to /metrics and /debug, you may want
|
||||
# to keep this endpoint private to your internal network
|
||||
metrics_listen_addr: 127.0.0.1:9090
|
||||
|
||||
# Address to listen for gRPC.
|
||||
@@ -43,9 +41,9 @@ grpc_allow_insecure: false
|
||||
# The Noise section includes specific configuration for the
|
||||
# TS2021 Noise protocol
|
||||
noise:
|
||||
# The Noise private key is used to encrypt the
|
||||
# traffic between headscale and Tailscale clients when
|
||||
# using the new Noise-based protocol.
|
||||
# The Noise private key is used to encrypt the traffic between headscale and
|
||||
# Tailscale clients when using the new Noise-based protocol. A missing key
|
||||
# will be automatically generated.
|
||||
private_key_path: /var/lib/headscale/noise_private.key
|
||||
|
||||
# List of IP prefixes to allocate tailaddresses from.
|
||||
@@ -87,16 +85,17 @@ derp:
|
||||
region_code: "headscale"
|
||||
region_name: "Headscale Embedded DERP"
|
||||
|
||||
# Only allow clients associated with this server access
|
||||
verify_clients: true
|
||||
|
||||
# Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
|
||||
# When the embedded DERP server is enabled stun_listen_addr MUST be defined.
|
||||
#
|
||||
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
|
||||
stun_listen_addr: "0.0.0.0:3478"
|
||||
|
||||
# Private key used to encrypt the traffic between headscale DERP
|
||||
# and Tailscale clients.
|
||||
# The private key file will be autogenerated if it's missing.
|
||||
#
|
||||
# Private key used to encrypt the traffic between headscale DERP and
|
||||
# Tailscale clients. A missing key will be automatically generated.
|
||||
private_key_path: /var/lib/headscale/derp_server_private.key
|
||||
|
||||
# This flag can be used, so the DERP map entry for the embedded DERP server is not written automatically,
|
||||
@@ -226,9 +225,11 @@ tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
|
||||
log:
|
||||
# Valid log levels: panic, fatal, error, warn, info, debug, trace
|
||||
level: info
|
||||
|
||||
# Output formatting for logs: text or json
|
||||
format: text
|
||||
level: info
|
||||
|
||||
## Policy
|
||||
# headscale supports Tailscale's ACL policies.
|
||||
@@ -274,6 +275,10 @@ dns:
|
||||
# `hostname.base_domain` (e.g., _myhost.example.com_).
|
||||
base_domain: example.com
|
||||
|
||||
# Whether to use the local DNS settings of a node (default) or override the
|
||||
# local DNS settings and force the use of Headscale's DNS configuration.
|
||||
override_local_dns: false
|
||||
|
||||
# List of DNS servers to expose to clients.
|
||||
nameservers:
|
||||
global:
|
||||
@@ -301,7 +306,7 @@ dns:
|
||||
search_domains: []
|
||||
|
||||
# Extra DNS records
|
||||
# so far only A-records are supported (on the tailscale side)
|
||||
# so far only A and AAAA records are supported (on the tailscale side)
|
||||
# See: docs/ref/dns.md
|
||||
extra_records: []
|
||||
# - name: "grafana.myvpn.example.com"
|
||||
@@ -310,68 +315,81 @@ dns:
|
||||
#
|
||||
# # you can also put it in one line
|
||||
# - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.3" }
|
||||
#
|
||||
# Alternatively, extra DNS records can be loaded from a JSON file.
|
||||
# Headscale processes this file on each change.
|
||||
# extra_records_path: /var/lib/headscale/extra-records.json
|
||||
|
||||
# Unix socket used for the CLI to connect without authentication
|
||||
# Note: for production you will want to set this to something like:
|
||||
unix_socket: /var/run/headscale/headscale.sock
|
||||
unix_socket_permission: "0770"
|
||||
#
|
||||
# headscale supports experimental OpenID connect support,
|
||||
# it is still being tested and might have some bugs, please
|
||||
# help us test it.
|
||||
|
||||
# OpenID Connect
|
||||
# oidc:
|
||||
# # Block startup until the identity provider is available and healthy.
|
||||
# only_start_if_oidc_is_available: true
|
||||
#
|
||||
# # OpenID Connect Issuer URL from the identity provider
|
||||
# issuer: "https://your-oidc.issuer.com/path"
|
||||
#
|
||||
# # Client ID from the identity provider
|
||||
# client_id: "your-oidc-client-id"
|
||||
#
|
||||
# # Client secret generated by the identity provider
|
||||
# # Note: client_secret and client_secret_path are mutually exclusive.
|
||||
# client_secret: "your-oidc-client-secret"
|
||||
# # Alternatively, set `client_secret_path` to read the secret from the file.
|
||||
# # It resolves environment variables, making integration to systemd's
|
||||
# # `LoadCredential` straightforward:
|
||||
# client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
|
||||
# # client_secret and client_secret_path are mutually exclusive.
|
||||
#
|
||||
# # The amount of time from a node is authenticated with OpenID until it
|
||||
# # expires and needs to reauthenticate.
|
||||
# # The amount of time a node is authenticated with OpenID until it expires
|
||||
# # and needs to reauthenticate.
|
||||
# # Setting the value to "0" will mean no expiry.
|
||||
# expiry: 180d
|
||||
#
|
||||
# # Use the expiry from the token received from OpenID when the user logged
|
||||
# # in, this will typically lead to frequent need to reauthenticate and should
|
||||
# # only been enabled if you know what you are doing.
|
||||
# # in. This will typically lead to frequent need to reauthenticate and should
|
||||
# # only be enabled if you know what you are doing.
|
||||
# # Note: enabling this will cause `oidc.expiry` to be ignored.
|
||||
# use_expiry_from_token: false
|
||||
#
|
||||
# # Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
|
||||
# # parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
|
||||
# # The OIDC scopes to use, defaults to "openid", "profile" and "email".
|
||||
# # Custom scopes can be configured as needed, be sure to always include the
|
||||
# # required "openid" scope.
|
||||
# scope: ["openid", "profile", "email"]
|
||||
#
|
||||
# scope: ["openid", "profile", "email", "custom"]
|
||||
# # Provide custom key/value pairs which get sent to the identity provider's
|
||||
# # authorization endpoint.
|
||||
# extra_params:
|
||||
# domain_hint: example.com
|
||||
#
|
||||
# # List allowed principal domains and/or users. If an authenticated user's domain is not in this list, the
|
||||
# # authentication request will be rejected.
|
||||
#
|
||||
# # Only accept users whose email domain is part of the allowed_domains list.
|
||||
# allowed_domains:
|
||||
# - example.com
|
||||
# # Note: Groups from keycloak have a leading '/'
|
||||
# allowed_groups:
|
||||
# - /headscale
|
||||
#
|
||||
# # Only accept users whose email address is part of the allowed_users list.
|
||||
# allowed_users:
|
||||
# - alice@example.com
|
||||
#
|
||||
# # Map legacy users from pre-0.24.0 versions of headscale to the new OIDC users
|
||||
# # by taking the username from the legacy user and matching it with the username
|
||||
# # provided by the OIDC. This is useful when migrating from legacy users to OIDC
|
||||
# # to force them using the unique identifier from the OIDC and to give them a
|
||||
# # proper display name and picture if available.
|
||||
# # Note that this will only work if the username from the legacy user is the same
|
||||
# # and there is a possibility for account takeover should a username have changed
|
||||
# # with the provider.
|
||||
# # Disabling this feature will cause all new logins to be created as new users.
|
||||
# # Note this option will be removed in the future and should be set to false
|
||||
# # on all new installations, or when all users have logged in with OIDC once.
|
||||
# map_legacy_users: true
|
||||
# # Only accept users which are members of at least one group in the
|
||||
# # allowed_groups list.
|
||||
# allowed_groups:
|
||||
# - /headscale
|
||||
#
|
||||
# # Optional: PKCE (Proof Key for Code Exchange) configuration
|
||||
# # PKCE adds an additional layer of security to the OAuth 2.0 authorization code flow
|
||||
# # by preventing authorization code interception attacks
|
||||
# # See https://datatracker.ietf.org/doc/html/rfc7636
|
||||
# pkce:
|
||||
# # Enable or disable PKCE support (default: false)
|
||||
# enabled: false
|
||||
#
|
||||
# # PKCE method to use:
|
||||
# # - plain: Use plain code verifier
|
||||
# # - S256: Use SHA256 hashed code verifier (default, recommended)
|
||||
# method: S256
|
||||
|
||||
# Logtail configuration
|
||||
# Logtail is Tailscales logging and auditing infrastructure, it allows the control panel
|
||||
|
||||
@@ -10,7 +10,7 @@ headscale.
|
||||
| OpenBSD | Yes |
|
||||
| FreeBSD | Yes |
|
||||
| Windows | Yes (see [docs](../usage/connect/windows.md) and `/windows` on your headscale for more information) |
|
||||
| Android | Yes (see [docs](../usage/connect/android.md)) |
|
||||
| Android | Yes (see [docs](../usage/connect/android.md) for more information) |
|
||||
| macOS | Yes (see [docs](../usage/connect/apple.md#macos) and `/apple` on your headscale for more information) |
|
||||
| iOS | Yes (see [docs](../usage/connect/apple.md#ios) and `/apple` on your headscale for more information) |
|
||||
| tvOS | Yes (see [docs](../usage/connect/apple.md#tvos) and `/apple` on your headscale for more information) |
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
|
||||
## What is the design goal of headscale?
|
||||
|
||||
Headscale aims to implement a self-hosted, open source alternative to the [Tailscale](https://tailscale.com/)
|
||||
control server.
|
||||
Headscale's goal is to provide self-hosters and hobbyists with an open-source
|
||||
server they can use for their projects and labs.
|
||||
It implements a narrow scope, a _single_ Tailnet, suitable for a personal use, or a small
|
||||
open-source organisation.
|
||||
Headscale aims to implement a self-hosted, open source alternative to the
|
||||
[Tailscale](https://tailscale.com/) control server. Headscale's goal is to
|
||||
provide self-hosters and hobbyists with an open-source server they can use for
|
||||
their projects and labs. It implements a narrow scope, a _single_ Tailscale
|
||||
network (tailnet), suitable for a personal use, or a small open-source
|
||||
organisation.
|
||||
|
||||
## How can I contribute?
|
||||
|
||||
@@ -40,22 +40,77 @@ official releases](../setup/install/official.md) for more information.
|
||||
In addition to that, you may use packages provided by the community or from distributions. Learn more in the
|
||||
[installation guide using community packages](../setup/install/community.md).
|
||||
|
||||
For convenience, we also [build Docker images with headscale](../setup/install/container.md). But **please be aware that
|
||||
For convenience, we also [build container images with headscale](../setup/install/container.md). But **please be aware that
|
||||
we don't officially support deploying headscale using Docker**. On our [Discord server](https://discord.gg/c84AZQhmpx)
|
||||
we have a "docker-issues" channel where you can ask for Docker-specific help to the community.
|
||||
|
||||
## Scaling / How many clients does Headscale support?
|
||||
|
||||
It depends. As often stated, Headscale is not enterprise software and our focus
|
||||
is homelabbers and self-hosters. Of course, we do not prevent people from using
|
||||
it in a commercial/professional setting and often get questions about scaling.
|
||||
|
||||
Please note that when Headscale is developed, performance is not part of the
|
||||
consideration as the main audience is considered to be users with a modest
|
||||
amount of devices. We focus on correctness and feature parity with Tailscale
|
||||
SaaS over time.
|
||||
|
||||
To understand if you might be able to use Headscale for your use case, I will
|
||||
describe two scenarios in an effort to explain what is the central bottleneck
|
||||
of Headscale:
|
||||
|
||||
1. An environment with 1000 servers
|
||||
|
||||
- they rarely "move" (change their endpoints)
|
||||
- new nodes are added rarely
|
||||
|
||||
2. An environment with 80 laptops/phones (end user devices)
|
||||
|
||||
- nodes move often, e.g. switching from home to office
|
||||
|
||||
Headscale calculates a map of all nodes that need to talk to each other,
|
||||
creating this "world map" requires a lot of CPU time. When an event that
|
||||
requires changes to this map happens, the whole "world" is recalculated, and a
|
||||
new "world map" is created for every node in the network.
|
||||
|
||||
This means that under certain conditions, Headscale can likely handle 100s
|
||||
of devices (maybe more), if there is _little to no change_ happening in the
|
||||
network. For example, in Scenario 1, the process of computing the world map is
|
||||
extremely demanding due to the size of the network, but when the map has been
|
||||
created and the nodes are not changing, the Headscale instance will likely
|
||||
return to a very low resource usage until the next time there is an event
|
||||
requiring the new map.
|
||||
|
||||
In the case of Scenario 2, the process of computing the world map is less
|
||||
demanding due to the smaller size of the network, however, the type of nodes
|
||||
will likely change frequently, which would lead to a constant resource usage.
|
||||
|
||||
Headscale will start to struggle when the two scenarios overlap, e.g. many nodes
|
||||
with frequent changes will cause the resource usage to remain constantly high.
|
||||
In the worst case scenario, the queue of nodes waiting for their map will grow
|
||||
to a point where Headscale never will be able to catch up, and nodes will never
|
||||
learn about the current state of the world.
|
||||
|
||||
We expect that the performance will improve over time as we improve the code
|
||||
base, but it is not a focus. In general, we will never make the tradeoff to make
|
||||
things faster on the cost of less maintainable or readable code. We are a small
|
||||
team and have to optimise for maintainability.
|
||||
|
||||
## Which database should I use?
|
||||
|
||||
We recommend the use of SQLite as database for headscale:
|
||||
|
||||
- SQLite is simple to setup and easy to use
|
||||
- It scales well for all of headscale's usecases
|
||||
- It scales well for all of headscale's use cases
|
||||
- Development and testing happens primarily on SQLite
|
||||
- PostgreSQL is still supported, but is considered to be in "maintenance mode"
|
||||
|
||||
The headscale project itself does not provide a tool to migrate from PostgreSQL to SQLite. Please have a look at [the
|
||||
related tools documentation](../ref/integration/tools.md) for migration tooling provided by the community.
|
||||
|
||||
The choice of database has little to no impact on the performance of the server,
|
||||
see [Scaling / How many clients does Headscale support?](#scaling-how-many-clients-does-headscale-support) for understanding how Headscale spends its resources.
|
||||
|
||||
## Why is my reverse proxy not working with headscale?
|
||||
|
||||
We don't know. We don't use reverse proxies with headscale ourselves, so we don't have any experience with them. We have
|
||||
@@ -66,3 +121,16 @@ help to the community.
|
||||
## Can I use headscale and tailscale on the same machine?
|
||||
|
||||
Running headscale on a machine that is also in the tailnet can cause problems with subnet routers, traffic relay nodes, and MagicDNS. It might work, but it is not supported.
|
||||
|
||||
## Why do two nodes see each other in their status, even if an ACL allows traffic only in one direction?
|
||||
|
||||
A frequent use case is to allow traffic only from one node to another, but not the other way around. For example, the
|
||||
workstation of an administrator should be able to connect to all nodes but the nodes themselves shouldn't be able to
|
||||
connect back to the administrator's node. Why do all nodes see the administrator's workstation in the output of
|
||||
`tailscale status`?
|
||||
|
||||
This is essentially how Tailscale works. If traffic is allowed to flow in one direction, then both nodes see each other
|
||||
in their output of `tailscale status`. Traffic is still filtered according to the ACL, with the exception of `tailscale
|
||||
ping` which is always allowed in either direction.
|
||||
|
||||
See also <https://tailscale.com/kb/1087/device-visibility>.
|
||||
|
||||
@@ -2,30 +2,36 @@
|
||||
|
||||
Headscale aims to implement a self-hosted, open source alternative to the Tailscale control server. Headscale's goal is
|
||||
to provide self-hosters and hobbyists with an open-source server they can use for their projects and labs. This page
|
||||
provides on overview of headscale's feature and compatibility with the Tailscale control server:
|
||||
provides on overview of Headscale's feature and compatibility with the Tailscale control server:
|
||||
|
||||
- [x] Full "base" support of Tailscale's features
|
||||
- [x] Node registration
|
||||
- [x] Interactive
|
||||
- [x] Pre authenticated key
|
||||
- [x] [DNS](https://tailscale.com/kb/1054/dns)
|
||||
- [x] [DNS](../ref/dns.md)
|
||||
- [x] [MagicDNS](https://tailscale.com/kb/1081/magicdns)
|
||||
- [x] [Global and restricted nameservers (split DNS)](https://tailscale.com/kb/1054/dns#nameservers)
|
||||
- [x] [search domains](https://tailscale.com/kb/1054/dns#search-domains)
|
||||
- [x] [Extra DNS records (headscale only)](../ref/dns.md#setting-custom-dns-records)
|
||||
- [x] [Extra DNS records (Headscale only)](../ref/dns.md#setting-extra-dns-records)
|
||||
- [x] [Taildrop (File Sharing)](https://tailscale.com/kb/1106/taildrop)
|
||||
- [x] Routing advertising (including exit nodes)
|
||||
- [x] [Routes](../ref/routes.md)
|
||||
- [x] [Subnet routers](../ref/routes.md#subnet-router)
|
||||
- [x] [Exit nodes](../ref/routes.md#exit-node)
|
||||
- [x] Dual stack (IPv4 and IPv6)
|
||||
- [x] Ephemeral nodes
|
||||
- [x] Embedded [DERP server](https://tailscale.com/kb/1232/derp-servers)
|
||||
- [x] Access control lists ([GitHub label "policy"](https://github.com/juanfont/headscale/labels/policy%20%F0%9F%93%9D))
|
||||
- [x] ACL management via API
|
||||
- [x] `autogroup:internet`
|
||||
- [ ] `autogroup:self`
|
||||
- [ ] `autogroup:member`
|
||||
* [ ] Node registration using Single-Sign-On (OpenID Connect) ([GitHub label "OIDC"](https://github.com/juanfont/headscale/labels/OIDC))
|
||||
- [x] Some [Autogroups](https://tailscale.com/kb/1396/targets#autogroups), currently: `autogroup:internet`,
|
||||
`autogroup:nonroot`, `autogroup:member`, `autogroup:tagged`
|
||||
- [x] [Auto approvers](https://tailscale.com/kb/1337/acl-syntax#auto-approvers) for [subnet
|
||||
routers](../ref/routes.md#automatically-approve-routes-of-a-subnet-router) and [exit
|
||||
nodes](../ref/routes.md#automatically-approve-an-exit-node-with-auto-approvers)
|
||||
- [x] [Tailscale SSH](https://tailscale.com/kb/1193/tailscale-ssh)
|
||||
* [x] [Node registration using Single-Sign-On (OpenID Connect)](../ref/oidc.md) ([GitHub label "OIDC"](https://github.com/juanfont/headscale/labels/OIDC))
|
||||
- [x] Basic registration
|
||||
- [ ] Dynamic ACL support
|
||||
- [x] Update user profile from identity provider
|
||||
- [ ] OIDC groups cannot be used in ACLs
|
||||
- [ ] [Funnel](https://tailscale.com/kb/1223/funnel) ([#1040](https://github.com/juanfont/headscale/issues/1040))
|
||||
- [ ] [Serve](https://tailscale.com/kb/1312/serve) ([#1234](https://github.com/juanfont/headscale/issues/1921))
|
||||
- [ ] [Network flow logs](https://tailscale.com/kb/1219/network-flow-logs) ([#1687](https://github.com/juanfont/headscale/issues/1687))
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
|
||||
All headscale releases are available on the [GitHub release page](https://github.com/juanfont/headscale/releases). Those
|
||||
releases are available as binaries for various platforms and architectures, packages for Debian based systems and source
|
||||
code archives. Container images are available on [Docker Hub](https://hub.docker.com/r/headscale/headscale).
|
||||
code archives. Container images are available on [Docker Hub](https://hub.docker.com/r/headscale/headscale) and
|
||||
[GitHub Container Registry](https://github.com/juanfont/headscale/pkgs/container/headscale).
|
||||
|
||||
An Atom/RSS feed of headscale releases is available [here](https://github.com/juanfont/headscale/releases.atom).
|
||||
|
||||
|
||||
@@ -14,12 +14,12 @@ Join our [Discord server](https://discord.gg/c84AZQhmpx) for a chat and communit
|
||||
|
||||
## Design goal
|
||||
|
||||
Headscale aims to implement a self-hosted, open source alternative to the Tailscale
|
||||
control server.
|
||||
Headscale's goal is to provide self-hosters and hobbyists with an open-source
|
||||
server they can use for their projects and labs.
|
||||
It implements a narrower scope, a single Tailnet, suitable for a personal use, or a small
|
||||
open-source organisation.
|
||||
Headscale aims to implement a self-hosted, open source alternative to the
|
||||
[Tailscale](https://tailscale.com/) control server. Headscale's goal is to
|
||||
provide self-hosters and hobbyists with an open-source server they can use for
|
||||
their projects and labs. It implements a narrow scope, a _single_ Tailscale
|
||||
network (tailnet), suitable for a personal use, or a small open-source
|
||||
organisation.
|
||||
|
||||
## Supporting headscale
|
||||
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
# Packaging
|
||||
|
||||
We use [nFPM](https://nfpm.goreleaser.com/) for making `.deb`, `.rpm` and `.apk`.
|
||||
|
||||
This folder contains files we need to package with these releases.
|
||||
@@ -1,88 +0,0 @@
|
||||
#!/bin/sh
|
||||
# Determine OS platform
|
||||
# shellcheck source=/dev/null
|
||||
. /etc/os-release
|
||||
|
||||
HEADSCALE_EXE="/usr/bin/headscale"
|
||||
BSD_HIER=""
|
||||
HEADSCALE_RUN_DIR="/var/run/headscale"
|
||||
HEADSCALE_HOME_DIR="/var/lib/headscale"
|
||||
HEADSCALE_USER="headscale"
|
||||
HEADSCALE_GROUP="headscale"
|
||||
HEADSCALE_SHELL="/usr/sbin/nologin"
|
||||
|
||||
ensure_sudo() {
|
||||
if [ "$(id -u)" = "0" ]; then
|
||||
echo "Sudo permissions detected"
|
||||
else
|
||||
echo "No sudo permission detected, please run as sudo"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
ensure_headscale_path() {
|
||||
if [ ! -f "$HEADSCALE_EXE" ]; then
|
||||
echo "headscale not in default path, exiting..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf "Found headscale %s\n" "$HEADSCALE_EXE"
|
||||
}
|
||||
|
||||
create_headscale_user() {
|
||||
printf "PostInstall: Adding headscale user %s\n" "$HEADSCALE_USER"
|
||||
useradd -s "$HEADSCALE_SHELL" -d "$HEADSCALE_HOME_DIR" -c "headscale default user" "$HEADSCALE_USER"
|
||||
}
|
||||
|
||||
create_headscale_group() {
|
||||
if command -V systemctl >/dev/null 2>&1; then
|
||||
printf "PostInstall: Adding headscale group %s\n" "$HEADSCALE_GROUP"
|
||||
groupadd "$HEADSCALE_GROUP"
|
||||
|
||||
printf "PostInstall: Adding headscale user %s to group %s\n" "$HEADSCALE_USER" "$HEADSCALE_GROUP"
|
||||
usermod -a -G "$HEADSCALE_GROUP" "$HEADSCALE_USER"
|
||||
fi
|
||||
|
||||
if [ "$ID" = "alpine" ]; then
|
||||
printf "PostInstall: Adding headscale group %s\n" "$HEADSCALE_GROUP"
|
||||
addgroup "$HEADSCALE_GROUP"
|
||||
|
||||
printf "PostInstall: Adding headscale user %s to group %s\n" "$HEADSCALE_USER" "$HEADSCALE_GROUP"
|
||||
addgroup "$HEADSCALE_USER" "$HEADSCALE_GROUP"
|
||||
fi
|
||||
}
|
||||
|
||||
create_run_dir() {
|
||||
printf "PostInstall: Creating headscale run directory \n"
|
||||
mkdir -p "$HEADSCALE_RUN_DIR"
|
||||
|
||||
printf "PostInstall: Modifying group ownership of headscale run directory \n"
|
||||
chown "$HEADSCALE_USER":"$HEADSCALE_GROUP" "$HEADSCALE_RUN_DIR"
|
||||
}
|
||||
|
||||
summary() {
|
||||
echo "----------------------------------------------------------------------"
|
||||
echo " headscale package has been successfully installed."
|
||||
echo ""
|
||||
echo " Please follow the next steps to start the software:"
|
||||
echo ""
|
||||
echo " sudo systemctl enable headscale"
|
||||
echo " sudo systemctl start headscale"
|
||||
echo ""
|
||||
echo " Configuration settings can be adjusted here:"
|
||||
echo " ${BSD_HIER}/etc/headscale/config.yaml"
|
||||
echo ""
|
||||
echo "----------------------------------------------------------------------"
|
||||
}
|
||||
|
||||
#
|
||||
# Main body of the script
|
||||
#
|
||||
{
|
||||
ensure_sudo
|
||||
ensure_headscale_path
|
||||
create_headscale_user
|
||||
create_headscale_group
|
||||
create_run_dir
|
||||
summary
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/sh
|
||||
# Determine OS platform
|
||||
# shellcheck source=/dev/null
|
||||
. /etc/os-release
|
||||
|
||||
if command -V systemctl >/dev/null 2>&1; then
|
||||
echo "Stop and disable headscale service"
|
||||
systemctl stop headscale >/dev/null 2>&1 || true
|
||||
systemctl disable headscale >/dev/null 2>&1 || true
|
||||
echo "Running daemon-reload"
|
||||
systemctl daemon-reload || true
|
||||
fi
|
||||
|
||||
echo "Removing run directory"
|
||||
rm -rf "/var/run/headscale.sock"
|
||||
@@ -40,9 +40,6 @@ servers.
|
||||
|
||||
## ACL setup
|
||||
|
||||
Note: Users will be created automatically when users authenticate with the
|
||||
headscale server.
|
||||
|
||||
ACLs have to be written in [huJSON](https://github.com/tailscale/hujson).
|
||||
|
||||
When [registering the servers](../usage/getting-started.md#register-a-node) we
|
||||
@@ -52,19 +49,25 @@ tags to a server they can register, the check of the tags is done on headscale
|
||||
server and only valid tags are applied. A tag is valid if the user that is
|
||||
registering it is allowed to do it.
|
||||
|
||||
To use ACLs in headscale, you must edit your `config.yaml` file. In there you will find a `policy.path` parameter. This will need to point to your ACL file. More info on how these policies are written can be found [here](https://tailscale.com/kb/1018/acls/).
|
||||
To use ACLs in headscale, you must edit your `config.yaml` file. In there you will find a `policy.path` parameter. This
|
||||
will need to point to your ACL file. More info on how these policies are written can be found
|
||||
[here](https://tailscale.com/kb/1018/acls/).
|
||||
|
||||
Please reload or restart Headscale after updating the ACL file. Headscale may be reloaded either via its systemd service
|
||||
(`sudo systemctl reload headscale`) or by sending a SIGHUP signal (`sudo kill -HUP $(pidof headscale)`) to the main
|
||||
process. Headscale logs the result of ACL policy processing after each reload.
|
||||
|
||||
Here are the ACL's to implement the same permissions as above:
|
||||
|
||||
```json
|
||||
```json title="acl.json"
|
||||
{
|
||||
// groups are collections of users having a common scope. A user can be in multiple groups
|
||||
// groups cannot be composed of groups
|
||||
"groups": {
|
||||
"group:boss": ["boss"],
|
||||
"group:dev": ["dev1", "dev2"],
|
||||
"group:admin": ["admin1"],
|
||||
"group:intern": ["intern1"]
|
||||
"group:boss": ["boss@"],
|
||||
"group:dev": ["dev1@", "dev2@"],
|
||||
"group:admin": ["admin1@"],
|
||||
"group:intern": ["intern1@"]
|
||||
},
|
||||
// tagOwners in tailscale is an association between a TAG and the people allowed to set this TAG on a server.
|
||||
// This is documented [here](https://tailscale.com/kb/1068/acl-tags#defining-a-tag)
|
||||
@@ -146,13 +149,11 @@ Here are the ACL's to implement the same permissions as above:
|
||||
},
|
||||
// developers have access to the internal network through the router.
|
||||
// the internal network is composed of HTTPS endpoints and Postgresql
|
||||
// database servers. There's an additional rule to allow traffic to be
|
||||
// forwarded to the internal subnet, 10.20.0.0/16. See this issue
|
||||
// https://github.com/juanfont/headscale/issues/502
|
||||
// database servers.
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:dev"],
|
||||
"dst": ["10.20.0.0/16:443,5432", "router.internal:0"]
|
||||
"dst": ["10.20.0.0/16:443,5432"]
|
||||
},
|
||||
|
||||
// servers should be able to talk to database in tcp/5432. Database should not be able to initiate connections to
|
||||
@@ -178,11 +179,11 @@ Here are the ACL's to implement the same permissions as above:
|
||||
|
||||
// We still have to allow internal users communications since nothing guarantees that each user have
|
||||
// their own users.
|
||||
{ "action": "accept", "src": ["boss"], "dst": ["boss:*"] },
|
||||
{ "action": "accept", "src": ["dev1"], "dst": ["dev1:*"] },
|
||||
{ "action": "accept", "src": ["dev2"], "dst": ["dev2:*"] },
|
||||
{ "action": "accept", "src": ["admin1"], "dst": ["admin1:*"] },
|
||||
{ "action": "accept", "src": ["intern1"], "dst": ["intern1:*"] }
|
||||
{ "action": "accept", "src": ["boss@"], "dst": ["boss@:*"] },
|
||||
{ "action": "accept", "src": ["dev1@"], "dst": ["dev1@:*"] },
|
||||
{ "action": "accept", "src": ["dev2@"], "dst": ["dev2@:*"] },
|
||||
{ "action": "accept", "src": ["admin1@"], "dst": ["admin1@:*"] },
|
||||
{ "action": "accept", "src": ["intern1@"], "dst": ["intern1@:*"] }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -5,7 +5,9 @@
|
||||
- `/etc/headscale`
|
||||
- `$HOME/.headscale`
|
||||
- the current working directory
|
||||
- Use the command line flag `-c`, `--config` to load the configuration from a different path
|
||||
- To load the configuration from a different path, use:
|
||||
- the command line flag `-c`, `--config`
|
||||
- the environment variable `HEADSCALE_CONFIG`
|
||||
- Validate the configuration file with: `headscale configtest`
|
||||
|
||||
!!! example "Get the [example configuration from the GitHub repository](https://github.com/juanfont/headscale/blob/main/config-example.yaml)"
|
||||
|
||||
115
docs/ref/debug.md
Normal file
115
docs/ref/debug.md
Normal file
@@ -0,0 +1,115 @@
|
||||
# Debugging and troubleshooting
|
||||
|
||||
Headscale and Tailscale provide debug and introspection capabilities that can be helpful when things don't work as
|
||||
expected. This page explains some debugging techniques to help pinpoint problems.
|
||||
|
||||
Please also have a look at [Tailscale's Troubleshooting guide](https://tailscale.com/kb/1023/troubleshooting). It offers
|
||||
a many tips and suggestions to troubleshoot common issues.
|
||||
|
||||
## Tailscale
|
||||
|
||||
The Tailscale client itself offers many commands to introspect its state as well as the state of the network:
|
||||
|
||||
- [Check local network conditions](https://tailscale.com/kb/1080/cli#netcheck): `tailscale netcheck`
|
||||
- [Get the client status](https://tailscale.com/kb/1080/cli#status): `tailscale status --json`
|
||||
- [Get DNS status](https://tailscale.com/kb/1080/cli#dns): `tailscale dns status --all`
|
||||
- Client logs: `tailscale debug daemon-logs`
|
||||
- Client netmap: `tailscale debug netmap`
|
||||
- Test DERP connection: `tailscale debug derp headscale`
|
||||
- And many more, see: `tailscale debug --help`
|
||||
|
||||
Many of the commands are helpful when trying to understand differences between Headscale and Tailscale SaaS.
|
||||
|
||||
## Headscale
|
||||
|
||||
### Application logging
|
||||
|
||||
The log levels `debug` and `trace` can be useful to get more information from Headscale.
|
||||
|
||||
```yaml hl_lines="3"
|
||||
log:
|
||||
# Valid log levels: panic, fatal, error, warn, info, debug, trace
|
||||
level: debug
|
||||
```
|
||||
|
||||
### Database logging
|
||||
|
||||
The database debug mode logs all database queries. Enable it to see how Headscale interacts with its database. This also
|
||||
requires the application log level to be set to either `debug` or `trace`.
|
||||
|
||||
```yaml hl_lines="3 7"
|
||||
database:
|
||||
# Enable debug mode. This setting requires the log.level to be set to "debug" or "trace".
|
||||
debug: false
|
||||
|
||||
log:
|
||||
# Valid log levels: panic, fatal, error, warn, info, debug, trace
|
||||
level: debug
|
||||
```
|
||||
|
||||
### Metrics and debug endpoint
|
||||
|
||||
Headscale provides a metrics and debug endpoint. It allows to introspect different aspects such as:
|
||||
|
||||
- Information about the Go runtime, memory usage and statistics
|
||||
- Connected nodes and pending registrations
|
||||
- Active ACLs, filters and SSH policy
|
||||
- Current DERPMap
|
||||
- Prometheus metrics
|
||||
|
||||
!!! warning "Keep the metrics and debug endpoint private"
|
||||
|
||||
The listen address and port can be configured with the `metrics_listen_addr` variable in the [configuration
|
||||
file](./configuration.md). By default it listens on localhost, port 9090.
|
||||
|
||||
Keep the metrics and debug endpoint private to your internal network and don't expose it to the Internet.
|
||||
|
||||
Query metrics via <http://localhost:9090/metrics> and get an overview of available debug information via
|
||||
<http://localhost:9090/debug/>. Metrics may be queried from outside localhost but the debug interface is subject to
|
||||
additional protection despite listening on all interfaces.
|
||||
|
||||
=== "Direct access"
|
||||
|
||||
Access the debug interface directly on the server where Headscale is installed.
|
||||
|
||||
```console
|
||||
curl http://localhost:9090/debug/
|
||||
```
|
||||
|
||||
=== "SSH port forwarding"
|
||||
|
||||
Use SSH port forwarding to forward Headscale's metrics and debug port to your device.
|
||||
|
||||
```console
|
||||
ssh <HEADSCALE_SERVER> -L 9090:localhost:9090
|
||||
```
|
||||
|
||||
Access the debug interface on your device by opening <http://localhost:9090/debug/> in your web browser.
|
||||
|
||||
=== "Via debug key"
|
||||
|
||||
The access control of the debug interface supports the use of a debug key. Traffic is accepted if the path to a
|
||||
debug key is set via the environment variable `TS_DEBUG_KEY_PATH` and the debug key sent as value for `debugkey`
|
||||
parameter with each request.
|
||||
|
||||
```console
|
||||
openssl rand -hex 32 | tee debugkey.txt
|
||||
export TS_DEBUG_KEY_PATH=debugkey.txt
|
||||
headscale serve
|
||||
```
|
||||
|
||||
Access the debug interface on your device by opening `http://<IP_OF_HEADSCALE>:9090/debug/?debugkey=<DEBUG_KEY>` in
|
||||
your web browser. The `debugkey` parameter must be sent with every request.
|
||||
|
||||
=== "Via debug IP address"
|
||||
|
||||
The debug endpoint expects traffic from localhost. A different debug IP address may be configured by setting the
|
||||
`TS_ALLOW_DEBUG_IP` environment variable before starting Headscale. The debug IP address is ignored when the HTTP
|
||||
header `X-Forwarded-For` is present.
|
||||
|
||||
```console
|
||||
export TS_ALLOW_DEBUG_IP=192.168.0.10 # IP address of your device
|
||||
headscale serve
|
||||
```
|
||||
|
||||
Access the debug interface on your device by opening `http://<IP_OF_HEADSCALE>:9090/debug/` in your web browser.
|
||||
@@ -1,57 +1,88 @@
|
||||
# DNS
|
||||
|
||||
Headscale supports [most DNS features](../about/features.md) from Tailscale and DNS releated settings can be configured
|
||||
in the [configuration file](./configuration.md) within the `dns` section.
|
||||
Headscale supports [most DNS features](../about/features.md) from Tailscale. DNS related settings can be configured
|
||||
within `dns` section of the [configuration file](./configuration.md).
|
||||
|
||||
## Setting custom DNS records
|
||||
## Setting extra DNS records
|
||||
|
||||
!!! warning "Community documentation"
|
||||
Headscale allows to set extra DNS records which are made available via
|
||||
[MagicDNS](https://tailscale.com/kb/1081/magicdns). Extra DNS records can be configured either via static entries in the
|
||||
[configuration file](./configuration.md) or from a JSON file that Headscale continuously watches for changes:
|
||||
|
||||
This page is not actively maintained by the headscale authors and is
|
||||
written by community members. It is _not_ verified by headscale developers.
|
||||
- Use the `dns.extra_records` option in the [configuration file](./configuration.md) for entries that are static and
|
||||
don't change while Headscale is running. Those entries are processed when Headscale is starting up and changes to the
|
||||
configuration require a restart of Headscale.
|
||||
- For dynamic DNS records that may be added, updated or removed while Headscale is running or DNS records that are
|
||||
generated by scripts the option `dns.extra_records_path` in the [configuration file](./configuration.md) is useful.
|
||||
Set it to the absolute path of the JSON file containing DNS records and Headscale processes this file as it detects
|
||||
changes.
|
||||
|
||||
**It might be outdated and it might miss necessary steps**.
|
||||
|
||||
Headscale allows to set custom DNS records which are made available via
|
||||
[MagicDNS](https://tailscale.com/kb/1081/magicdns). An example use case is to serve multiple apps on the same host via a
|
||||
reverse proxy like NGINX, in this case a Prometheus monitoring stack. This allows to nicely access the service with
|
||||
"http://grafana.myvpn.example.com" instead of the hostname and port combination
|
||||
"http://hostname-in-magic-dns.myvpn.example.com:3000".
|
||||
An example use case is to serve multiple apps on the same host via a reverse proxy like NGINX, in this case a Prometheus
|
||||
monitoring stack. This allows to nicely access the service with "http://grafana.myvpn.example.com" instead of the
|
||||
hostname and port combination "http://hostname-in-magic-dns.myvpn.example.com:3000".
|
||||
|
||||
!!! warning "Limitations"
|
||||
|
||||
[Not all types of records are supported](https://github.com/tailscale/tailscale/blob/6edf357b96b28ee1be659a70232c0135b2ffedfd/ipn/ipnlocal/local.go#L2989-L3007), especially no CNAME records.
|
||||
Currently, [only A and AAAA records are processed by Tailscale](https://github.com/tailscale/tailscale/blob/v1.78.3/ipn/ipnlocal/local.go#L4461-L4479).
|
||||
|
||||
1. Update the [configuration file](./configuration.md) to contain the desired records like so:
|
||||
1. Configure extra DNS records using one of the available configuration options:
|
||||
|
||||
```yaml
|
||||
dns:
|
||||
...
|
||||
extra_records:
|
||||
- name: "prometheus.myvpn.example.com"
|
||||
type: "A"
|
||||
value: "100.64.0.3"
|
||||
=== "Static entries, via `dns.extra_records`"
|
||||
|
||||
- name: "grafana.myvpn.example.com"
|
||||
type: "A"
|
||||
value: "100.64.0.3"
|
||||
...
|
||||
```
|
||||
```yaml title="config.yaml"
|
||||
dns:
|
||||
...
|
||||
extra_records:
|
||||
- name: "grafana.myvpn.example.com"
|
||||
type: "A"
|
||||
value: "100.64.0.3"
|
||||
|
||||
1. Restart your headscale instance.
|
||||
- name: "prometheus.myvpn.example.com"
|
||||
type: "A"
|
||||
value: "100.64.0.3"
|
||||
...
|
||||
```
|
||||
|
||||
Restart your headscale instance.
|
||||
|
||||
=== "Dynamic entries, via `dns.extra_records_path`"
|
||||
|
||||
```json title="extra-records.json"
|
||||
[
|
||||
{
|
||||
"name": "grafana.myvpn.example.com",
|
||||
"type": "A",
|
||||
"value": "100.64.0.3"
|
||||
},
|
||||
{
|
||||
"name": "prometheus.myvpn.example.com",
|
||||
"type": "A",
|
||||
"value": "100.64.0.3"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Headscale picks up changes to the above JSON file automatically.
|
||||
|
||||
!!! tip "Good to know"
|
||||
|
||||
* The `dns.extra_records_path` option in the [configuration file](./configuration.md) needs to reference the
|
||||
JSON file containing extra DNS records.
|
||||
* Be sure to "sort keys" and produce a stable output in case you generate the JSON file with a script.
|
||||
Headscale uses a checksum to detect changes to the file and a stable output avoids unnecessary processing.
|
||||
|
||||
1. Verify that DNS records are properly set using the DNS querying tool of your choice:
|
||||
|
||||
=== "Query with dig"
|
||||
|
||||
```shell
|
||||
```console
|
||||
dig +short grafana.myvpn.example.com
|
||||
100.64.0.3
|
||||
```
|
||||
|
||||
=== "Query with drill"
|
||||
|
||||
```shell
|
||||
```console
|
||||
drill -Q grafana.myvpn.example.com
|
||||
100.64.0.3
|
||||
```
|
||||
@@ -61,7 +92,7 @@ reverse proxy like NGINX, in this case a Prometheus monitoring stack. This allow
|
||||
The motivating example here was to be able to access internal monitoring services on the same host without
|
||||
specifying a port, depicted as NGINX configuration snippet:
|
||||
|
||||
```
|
||||
```nginx title="nginx.conf"
|
||||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
# Exit Nodes
|
||||
|
||||
## On the node
|
||||
|
||||
Register the node and make it advertise itself as an exit node:
|
||||
|
||||
```console
|
||||
$ sudo tailscale up --login-server https://headscale.example.com --advertise-exit-node
|
||||
```
|
||||
|
||||
If the node is already registered, it can advertise exit capabilities like this:
|
||||
|
||||
```console
|
||||
$ sudo tailscale set --advertise-exit-node
|
||||
```
|
||||
|
||||
To use a node as an exit node, IP forwarding must be enabled on the node. Check the official [Tailscale documentation](https://tailscale.com/kb/1019/subnets/?tab=linux#enable-ip-forwarding) for how to enable IP forwarding.
|
||||
|
||||
## On the control server
|
||||
|
||||
```console
|
||||
$ # list nodes
|
||||
$ headscale routes list
|
||||
ID | Node | Prefix | Advertised | Enabled | Primary
|
||||
1 | | 0.0.0.0/0 | false | false | -
|
||||
2 | | ::/0 | false | false | -
|
||||
3 | phobos | 0.0.0.0/0 | true | false | -
|
||||
4 | phobos | ::/0 | true | false | -
|
||||
|
||||
$ # enable routes for phobos
|
||||
$ headscale routes enable -r 3
|
||||
$ headscale routes enable -r 4
|
||||
|
||||
$ # Check node list again. The routes are now enabled.
|
||||
$ headscale routes list
|
||||
ID | Node | Prefix | Advertised | Enabled | Primary
|
||||
1 | | 0.0.0.0/0 | false | false | -
|
||||
2 | | ::/0 | false | false | -
|
||||
3 | phobos | 0.0.0.0/0 | true | true | -
|
||||
4 | phobos | ::/0 | true | true | -
|
||||
```
|
||||
|
||||
## On the client
|
||||
|
||||
The exit node can now be used with:
|
||||
|
||||
```console
|
||||
$ sudo tailscale set --exit-node phobos
|
||||
```
|
||||
|
||||
Check the official [Tailscale documentation](https://tailscale.com/kb/1103/exit-nodes#use-the-exit-node) for how to do it on your device.
|
||||
@@ -23,7 +23,7 @@ Running headscale behind a cloudflare proxy or cloudflare tunnel is not supporte
|
||||
|
||||
Headscale can be configured not to use TLS, leaving it to the reverse proxy to handle. Add the following configuration values to your headscale config file.
|
||||
|
||||
```yaml
|
||||
```yaml title="config.yaml"
|
||||
server_url: https://<YOUR_SERVER_NAME> # This should be the FQDN at which headscale will be served
|
||||
listen_addr: 0.0.0.0:8080
|
||||
metrics_listen_addr: 0.0.0.0:9090
|
||||
@@ -35,7 +35,7 @@ tls_key_path: ""
|
||||
|
||||
The following example configuration can be used in your nginx setup, substituting values as necessary. `<IP:PORT>` should be the IP address and port where headscale is running. In most cases, this will be `http://localhost:8080`.
|
||||
|
||||
```Nginx
|
||||
```nginx title="nginx.conf"
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
@@ -113,7 +113,7 @@ spec:
|
||||
|
||||
The following Caddyfile is all that is necessary to use Caddy as a reverse proxy for headscale, in combination with the `config.yaml` specifications above to disable headscale's built in TLS. Replace values as necessary - `<YOUR_SERVER_NAME>` should be the FQDN at which headscale will be served, and `<IP:PORT>` should be the IP address and port where headscale is running. In most cases, this will be `localhost:8080`.
|
||||
|
||||
```
|
||||
```none title="Caddyfile"
|
||||
<YOUR_SERVER_NAME> {
|
||||
reverse_proxy <IP:PORT>
|
||||
}
|
||||
@@ -127,7 +127,7 @@ For a slightly more complex configuration which utilizes Docker containers to ma
|
||||
|
||||
The following minimal Apache config will proxy traffic to the headscale instance on `<IP:PORT>`. Note that `upgrade=any` is required as a parameter for `ProxyPass` so that WebSockets traffic whose `Upgrade` header value is not equal to `WebSocket` (i. e. Tailscale Control Protocol) is forwarded correctly. See the [Apache docs](https://httpd.apache.org/docs/2.4/mod/mod_proxy_wstunnel.html) for more information on this.
|
||||
|
||||
```
|
||||
```apache title="apache.conf"
|
||||
<VirtualHost *:443>
|
||||
ServerName <YOUR_SERVER_NAME>
|
||||
|
||||
|
||||
@@ -5,9 +5,11 @@
|
||||
This page contains community contributions. The projects listed here are not
|
||||
maintained by the headscale authors and are written by community members.
|
||||
|
||||
This page collects third-party tools and scripts related to headscale.
|
||||
This page collects third-party tools, client libraries, and scripts related to headscale.
|
||||
|
||||
| Name | Repository Link | Description |
|
||||
| --------------------- | --------------------------------------------------------------- | ------------------------------------------------- |
|
||||
| tailscale-manager | [Github](https://github.com/singlestore-labs/tailscale-manager) | Dynamically manage Tailscale route advertisements |
|
||||
| headscalebacktosqlite | [Github](https://github.com/bigbozza/headscalebacktosqlite) | Migrate headscale from PostgreSQL back to SQLite |
|
||||
| Name | Repository Link | Description |
|
||||
| --------------------- | --------------------------------------------------------------- | -------------------------------------------------------------------- |
|
||||
| tailscale-manager | [Github](https://github.com/singlestore-labs/tailscale-manager) | Dynamically manage Tailscale route advertisements |
|
||||
| headscalebacktosqlite | [Github](https://github.com/bigbozza/headscalebacktosqlite) | Migrate headscale from PostgreSQL back to SQLite |
|
||||
| headscale-pf | [Github](https://github.com/YouSysAdmin/headscale-pf) | Populates user groups based on user groups in Jumpcloud or Authentik |
|
||||
| headscale-client-go | [Github](https://github.com/hibare/headscale-client-go) | A Go client implementation for the Headscale HTTP API. |
|
||||
|
||||
@@ -7,13 +7,14 @@
|
||||
|
||||
Headscale doesn't provide a built-in web interface but users may pick one from the available options.
|
||||
|
||||
| Name | Repository Link | Description |
|
||||
| --------------- | ------------------------------------------------------- | ----------------------------------------------------------------------------------- |
|
||||
| headscale-webui | [Github](https://github.com/ifargle/headscale-webui) | A simple headscale web UI for small-scale deployments. |
|
||||
| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server |
|
||||
| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend enviroment required |
|
||||
| Headplane | [GitHub](https://github.com/tale/headplane) | An advanced Tailscale inspired frontend for headscale |
|
||||
| headscale-admin | [Github](https://github.com/GoodiesHQ/headscale-admin) | Headscale-Admin is meant to be a simple, modern web interface for headscale |
|
||||
| ouroboros | [Github](https://github.com/yellowsink/ouroboros) | Ouroboros is designed for users to manage their own devices, rather than for admins |
|
||||
| Name | Repository Link | Description |
|
||||
| ---------------------- | ----------------------------------------------------------- | -------------------------------------------------------------------------------------------- |
|
||||
| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server |
|
||||
| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend environment required |
|
||||
| Headplane | [GitHub](https://github.com/tale/headplane) | An advanced Tailscale inspired frontend for headscale |
|
||||
| headscale-admin | [Github](https://github.com/GoodiesHQ/headscale-admin) | Headscale-Admin is meant to be a simple, modern web interface for headscale |
|
||||
| ouroboros | [Github](https://github.com/yellowsink/ouroboros) | Ouroboros is designed for users to manage their own devices, rather than for admins |
|
||||
| unraid-headscale-admin | [Github](https://github.com/ich777/unraid-headscale-admin) | A simple headscale admin UI for Unraid, it offers Local (`docker exec`) and API Mode |
|
||||
| headscale-console | [Github](https://github.com/rickli-cloud/headscale-console) | WebAssembly-based client supporting SSH, VNC and RDP with optional self-service capabilities |
|
||||
|
||||
You can ask for support on our [Discord server](https://discord.gg/c84AZQhmpx) in the "web-interfaces" channel.
|
||||
|
||||
410
docs/ref/oidc.md
410
docs/ref/oidc.md
@@ -1,156 +1,272 @@
|
||||
# Configuring headscale to use OIDC authentication
|
||||
# OpenID Connect
|
||||
|
||||
In order to authenticate users through a centralized solution one must enable the OIDC integration.
|
||||
Headscale supports authentication via external identity providers using OpenID Connect (OIDC). It features:
|
||||
|
||||
Known limitations:
|
||||
- Auto configuration via OpenID Connect Discovery Protocol
|
||||
- [Proof Key for Code Exchange (PKCE) code verification](#enable-pkce-recommended)
|
||||
- [Authorization based on a user's domain, email address or group membership](#authorize-users-with-filters)
|
||||
- Synchronization of [standard OIDC claims](#supported-oidc-claims)
|
||||
|
||||
- No dynamic ACL support
|
||||
- OIDC groups cannot be used in ACLs
|
||||
Please see [limitations](#limitations) for known issues and limitations.
|
||||
|
||||
## Basic configuration
|
||||
## Configuration
|
||||
|
||||
In your `config.yaml`, customize this to your liking:
|
||||
OpenID requires configuration in Headscale and your identity provider:
|
||||
|
||||
```yaml
|
||||
oidc:
|
||||
# Block further startup until the OIDC provider is healthy and available
|
||||
only_start_if_oidc_is_available: true
|
||||
# Specified by your OIDC provider
|
||||
issuer: "https://your-oidc.issuer.com/path"
|
||||
# Specified/generated by your OIDC provider
|
||||
client_id: "your-oidc-client-id"
|
||||
client_secret: "your-oidc-client-secret"
|
||||
# alternatively, set `client_secret_path` to read the secret from the file.
|
||||
# It resolves environment variables, making integration to systemd's
|
||||
# `LoadCredential` straightforward:
|
||||
#client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
|
||||
# as third option, it's also possible to load the oidc secret from environment variables
|
||||
# set HEADSCALE_OIDC_CLIENT_SECRET to the required value
|
||||
- Headscale: The `oidc` section of the Headscale [configuration](configuration.md) contains all available configuration
|
||||
options along with a description and their default values.
|
||||
- Identity provider: Please refer to the official documentation of your identity provider for specific instructions.
|
||||
Additionally, there might be some useful hints in the [Identity provider specific
|
||||
configuration](#identity-provider-specific-configuration) section below.
|
||||
|
||||
# Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
|
||||
# parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
|
||||
scope: ["openid", "profile", "email", "custom"]
|
||||
# Optional: Passed on to the browser login request – used to tweak behaviour for the OIDC provider
|
||||
extra_params:
|
||||
domain_hint: example.com
|
||||
### Basic configuration
|
||||
|
||||
# Optional: List allowed principal domains and/or users. If an authenticated user's domain is not in this list,
|
||||
# the authentication request will be rejected.
|
||||
allowed_domains:
|
||||
- example.com
|
||||
# Optional. Note that groups from Keycloak have a leading '/'.
|
||||
allowed_groups:
|
||||
- /headscale
|
||||
# Optional.
|
||||
allowed_users:
|
||||
- alice@example.com
|
||||
A basic configuration connects Headscale to an identity provider and typically requires:
|
||||
|
||||
# If `strip_email_domain` is set to `true`, the domain part of the username email address will be removed.
|
||||
# This will transform `first-name.last-name@example.com` to the user `first-name.last-name`
|
||||
# If `strip_email_domain` is set to `false` the domain part will NOT be removed resulting to the following
|
||||
# user: `first-name.last-name.example.com`
|
||||
strip_email_domain: true
|
||||
```
|
||||
- OpenID Connect Issuer URL from the identity provider. Headscale uses the OpenID Connect Discovery Protocol 1.0 to
|
||||
automatically obtain OpenID configuration parameters (example: `https://sso.example.com`).
|
||||
- Client ID from the identity provider (example: `headscale`).
|
||||
- Client secret generated by the identity provider (example: `generated-secret`).
|
||||
- Redirect URI for your identity provider (example: `https://headscale.example.com/oidc/callback`).
|
||||
|
||||
## Azure AD example
|
||||
=== "Headscale"
|
||||
|
||||
In order to integrate headscale with Azure Active Directory, we'll need to provision an App Registration with the correct scopes and redirect URI. Here with Terraform:
|
||||
```yaml
|
||||
oidc:
|
||||
issuer: "https://sso.example.com"
|
||||
client_id: "headscale"
|
||||
client_secret: "generated-secret"
|
||||
```
|
||||
|
||||
```hcl
|
||||
resource "azuread_application" "headscale" {
|
||||
display_name = "Headscale"
|
||||
=== "Identity provider"
|
||||
|
||||
sign_in_audience = "AzureADMyOrg"
|
||||
fallback_public_client_enabled = false
|
||||
* Create a new confidential client (`Client ID`, `Client secret`)
|
||||
* Add Headscale's OIDC callback URL as valid redirect URL: `https://headscale.example.com/oidc/callback`
|
||||
* Configure additional parameters to improve user experience such as: name, description, logo, …
|
||||
|
||||
required_resource_access {
|
||||
// Microsoft Graph
|
||||
resource_app_id = "00000003-0000-0000-c000-000000000000"
|
||||
### Enable PKCE (recommended)
|
||||
|
||||
resource_access {
|
||||
// scope: profile
|
||||
id = "14dad69e-099b-42c9-810b-d002981feec1"
|
||||
type = "Scope"
|
||||
}
|
||||
resource_access {
|
||||
// scope: openid
|
||||
id = "37f7f235-527c-4136-accd-4a02d197296e"
|
||||
type = "Scope"
|
||||
}
|
||||
resource_access {
|
||||
// scope: email
|
||||
id = "64a6cdd6-aab1-4aaf-94b8-3cc8405e90d0"
|
||||
type = "Scope"
|
||||
}
|
||||
}
|
||||
web {
|
||||
# Points at your running headscale instance
|
||||
redirect_uris = ["https://headscale.example.com/oidc/callback"]
|
||||
Proof Key for Code Exchange (PKCE) adds an additional layer of security to the OAuth 2.0 authorization code flow by
|
||||
preventing authorization code interception attacks, see: <https://datatracker.ietf.org/doc/html/rfc7636>. PKCE is
|
||||
recommended and needs to be configured for Headscale and the identity provider alike:
|
||||
|
||||
implicit_grant {
|
||||
access_token_issuance_enabled = false
|
||||
id_token_issuance_enabled = true
|
||||
}
|
||||
}
|
||||
=== "Headscale"
|
||||
|
||||
group_membership_claims = ["SecurityGroup"]
|
||||
optional_claims {
|
||||
# Expose group memberships
|
||||
id_token {
|
||||
name = "groups"
|
||||
}
|
||||
}
|
||||
}
|
||||
```yaml hl_lines="5-6"
|
||||
oidc:
|
||||
issuer: "https://sso.example.com"
|
||||
client_id: "headscale"
|
||||
client_secret: "generated-secret"
|
||||
pkce:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
resource "azuread_application_password" "headscale-application-secret" {
|
||||
display_name = "Headscale Server"
|
||||
application_object_id = azuread_application.headscale.object_id
|
||||
}
|
||||
=== "Identity provider"
|
||||
|
||||
resource "azuread_service_principal" "headscale" {
|
||||
application_id = azuread_application.headscale.application_id
|
||||
}
|
||||
* Enable PKCE for the headscale client
|
||||
* Set the PKCE challenge method to "S256"
|
||||
|
||||
resource "azuread_service_principal_password" "headscale" {
|
||||
service_principal_id = azuread_service_principal.headscale.id
|
||||
end_date_relative = "44640h"
|
||||
}
|
||||
### Authorize users with filters
|
||||
|
||||
output "headscale_client_id" {
|
||||
value = azuread_application.headscale.application_id
|
||||
}
|
||||
Headscale allows to filter for allowed users based on their domain, email address or group membership. These filters can
|
||||
be helpful to apply additional restrictions and control which users are allowed to join. Filters are disabled by
|
||||
default, users are allowed to join once the authentication with the identity provider succeeds. In case multiple filters
|
||||
are configured, a user needs to pass all of them.
|
||||
|
||||
output "headscale_client_secret" {
|
||||
value = azuread_application_password.headscale-application-secret.value
|
||||
}
|
||||
```
|
||||
=== "Allowed domains"
|
||||
|
||||
And in your headscale `config.yaml`:
|
||||
* Check the email domain of each authenticating user against the list of allowed domains and only authorize users
|
||||
whose email domain matches `example.com`.
|
||||
* Access allowed: `alice@example.com`
|
||||
* Access denied: `bob@example.net`
|
||||
|
||||
```yaml
|
||||
oidc:
|
||||
issuer: "https://login.microsoftonline.com/<tenant-UUID>/v2.0"
|
||||
client_id: "<client-id-from-terraform>"
|
||||
client_secret: "<client-secret-from-terraform>"
|
||||
```yaml hl_lines="5-6"
|
||||
oidc:
|
||||
issuer: "https://sso.example.com"
|
||||
client_id: "headscale"
|
||||
client_secret: "generated-secret"
|
||||
allowed_domains:
|
||||
- "example.com"
|
||||
```
|
||||
|
||||
# Optional: add "groups"
|
||||
scope: ["openid", "profile", "email"]
|
||||
extra_params:
|
||||
# Use your own domain, associated with Azure AD
|
||||
domain_hint: example.com
|
||||
# Optional: Force the Azure AD account picker
|
||||
prompt: select_account
|
||||
```
|
||||
=== "Allowed users/emails"
|
||||
|
||||
## Google OAuth Example
|
||||
* Check the email address of each authenticating user against the list of allowed email addresses and only authorize
|
||||
users whose email is part of the `allowed_users` list.
|
||||
* Access allowed: `alice@example.com`, `bob@example.net`
|
||||
* Access denied: `mallory@example.net`
|
||||
|
||||
In order to integrate headscale with Google, you'll need to have a [Google Cloud Console](https://console.cloud.google.com) account.
|
||||
```yaml hl_lines="5-7"
|
||||
oidc:
|
||||
issuer: "https://sso.example.com"
|
||||
client_id: "headscale"
|
||||
client_secret: "generated-secret"
|
||||
allowed_users:
|
||||
- "alice@example.com"
|
||||
- "bob@example.net"
|
||||
```
|
||||
|
||||
Google OAuth has a [verification process](https://support.google.com/cloud/answer/9110914?hl=en) if you need to have users authenticate who are outside of your domain. If you only need to authenticate users from your domain name (ie `@example.com`), you don't need to go through the verification process.
|
||||
=== "Allowed groups"
|
||||
|
||||
However if you don't have a domain, or need to add users outside of your domain, you can manually add emails via Google Console.
|
||||
* Use the OIDC `groups` claim of each authenticating user to get their group membership and only authorize users
|
||||
which are members in at least one of the referenced groups.
|
||||
* Access allowed: users in the `headscale_users` group
|
||||
* Access denied: users without groups, users with other groups
|
||||
|
||||
### Steps
|
||||
```yaml hl_lines="5-7"
|
||||
oidc:
|
||||
issuer: "https://sso.example.com"
|
||||
client_id: "headscale"
|
||||
client_secret: "generated-secret"
|
||||
scope: ["openid", "profile", "email", "groups"]
|
||||
allowed_groups:
|
||||
- "headscale_users"
|
||||
```
|
||||
|
||||
### Customize node expiration
|
||||
|
||||
The node expiration is the amount of time a node is authenticated with OpenID Connect until it expires and needs to
|
||||
reauthenticate. The default node expiration is 180 days. This can either be customized or set to the expiration from the
|
||||
Access Token.
|
||||
|
||||
=== "Customize node expiration"
|
||||
|
||||
```yaml hl_lines="5"
|
||||
oidc:
|
||||
issuer: "https://sso.example.com"
|
||||
client_id: "headscale"
|
||||
client_secret: "generated-secret"
|
||||
expiry: 30d # Use 0 to disable node expiration
|
||||
```
|
||||
|
||||
=== "Use expiration from Access Token"
|
||||
|
||||
Please keep in mind that the Access Token is typically a short-lived token that expires within a few minutes. You
|
||||
will have to configure token expiration in your identity provider to avoid frequent re-authentication.
|
||||
|
||||
|
||||
```yaml hl_lines="5"
|
||||
oidc:
|
||||
issuer: "https://sso.example.com"
|
||||
client_id: "headscale"
|
||||
client_secret: "generated-secret"
|
||||
use_expiry_from_token: true
|
||||
```
|
||||
|
||||
!!! tip "Expire a node and force re-authentication"
|
||||
|
||||
A node can be expired immediately via:
|
||||
```console
|
||||
headscale node expire -i <NODE_ID>
|
||||
```
|
||||
|
||||
### Reference a user in the policy
|
||||
|
||||
You may refer to users in the Headscale policy via:
|
||||
|
||||
- Email address
|
||||
- Username
|
||||
- Provider identifier (only available in the database or from your identity provider)
|
||||
|
||||
!!! note "A user identifier in the policy must contain a single `@`"
|
||||
|
||||
The Headscale policy requires a single `@` to reference a user. If the username or provider identifier doesn't
|
||||
already contain a single `@`, it needs to be appended at the end. For example: the username `ssmith` has to be
|
||||
written as `ssmith@` to be correctly identified as user within the policy.
|
||||
|
||||
!!! warning "Email address or username might be updated by users"
|
||||
|
||||
Many identity providers allow users to update their own profile. Depending on the identity provider and its
|
||||
configuration, the values for username or email address might change over time. This might have unexpected
|
||||
consequences for Headscale where a policy might no longer work or a user might obtain more access by hijacking an
|
||||
existing username or email address.
|
||||
|
||||
## Supported OIDC claims
|
||||
|
||||
Headscale uses [the standard OIDC claims](https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims) to
|
||||
populate and update its local user profile on each login. OIDC claims are read from the ID Token or from the UserInfo
|
||||
endpoint.
|
||||
|
||||
| Headscale profile | OIDC claim | Notes / examples |
|
||||
| ------------------- | -------------------- | ------------------------------------------------------------------------------------------------- |
|
||||
| email address | `email` | Only used when `email_verified: true` |
|
||||
| display name | `name` | eg: `Sam Smith` |
|
||||
| username | `preferred_username` | Depends on identity provider, eg: `ssmith`, `ssmith@idp.example.com`, `\\example.com\ssmith` |
|
||||
| profile picture | `picture` | URL to a profile picture or avatar |
|
||||
| provider identifier | `iss`, `sub` | A stable and unique identifier for a user, typically a combination of `iss` and `sub` OIDC claims |
|
||||
| | `groups` | [Only used to filter for allowed groups](#authorize-users-with-filters) |
|
||||
|
||||
## Limitations
|
||||
|
||||
- Support for OpenID Connect aims to be generic and vendor independent. It offers only limited support for quirks of
|
||||
specific identity providers.
|
||||
- OIDC groups cannot be used in ACLs.
|
||||
- The username provided by the identity provider needs to adhere to this pattern:
|
||||
- The username must be at least two characters long.
|
||||
- It must only contain letters, digits, hyphens, dots, underscores, and up to a single `@`.
|
||||
- The username must start with a letter.
|
||||
- A user's email address is only synchronized to the local user profile when the identity provider marks the email
|
||||
address as verified (`email_verified: true`).
|
||||
|
||||
Please see the [GitHub label "OIDC"](https://github.com/juanfont/headscale/labels/OIDC) for OIDC related issues.
|
||||
|
||||
## Identity provider specific configuration
|
||||
|
||||
!!! warning "Third-party software and services"
|
||||
|
||||
This section of the documentation is specific for third-party software and services. We recommend users read the
|
||||
third-party documentation on how to configure and integrate an OIDC client. Please see the [Configuration
|
||||
section](#configuration) for a description of Headscale's OIDC related configuration settings.
|
||||
|
||||
Any identity provider with OpenID Connect support should "just work" with Headscale. The following identity providers
|
||||
are known to work:
|
||||
|
||||
- [Authelia](#authelia)
|
||||
- [Authentik](#authentik)
|
||||
- [Kanidm](#kanidm)
|
||||
- [Keycloak](#keycloak)
|
||||
|
||||
### Authelia
|
||||
|
||||
Authelia is fully supported by Headscale.
|
||||
|
||||
#### Additional configuration to authorize users based on filters
|
||||
|
||||
Authelia (4.39.0 or newer) no longer provides standard OIDC claims such as `email` or `groups` via the ID Token. The
|
||||
OIDC `email` and `groups` claims are used to [authorize users with filters](#authorize-users-with-filters). This extra
|
||||
configuration step is **only** needed if you need to authorize access based on one of the following user properties:
|
||||
|
||||
- domain
|
||||
- email address
|
||||
- group membership
|
||||
|
||||
Please follow the instructions from Authelia's documentation on how to [Restore Functionality Prior to Claims
|
||||
Parameter](https://www.authelia.com/integration/openid-connect/openid-connect-1.0-claims/#restore-functionality-prior-to-claims-parameter).
|
||||
|
||||
### Authentik
|
||||
|
||||
- Authentik is fully supported by Headscale.
|
||||
- [Headscale does not JSON Web Encryption](https://github.com/juanfont/headscale/issues/2446). Leave the field
|
||||
`Encryption Key` in the providers section unset.
|
||||
|
||||
### Google OAuth
|
||||
|
||||
!!! warning "No username due to missing preferred_username"
|
||||
|
||||
Google OAuth does not send the `preferred_username` claim when the scope `profile` is requested. The username in
|
||||
Headscale will be blank/not set.
|
||||
|
||||
In order to integrate Headscale with Google, you'll need to have a [Google Cloud
|
||||
Console](https://console.cloud.google.com) account.
|
||||
|
||||
Google OAuth has a [verification process](https://support.google.com/cloud/answer/9110914?hl=en) if you need to have
|
||||
users authenticate who are outside of your domain. If you only need to authenticate users from your domain name (ie
|
||||
`@example.com`), you don't need to go through the verification process.
|
||||
|
||||
However if you don't have a domain, or need to add users outside of your domain, you can manually add emails via Google
|
||||
Console.
|
||||
|
||||
#### Steps
|
||||
|
||||
1. Go to [Google Console](https://console.cloud.google.com) and login or create an account if you don't have one.
|
||||
2. Create a project (if you don't already have one).
|
||||
@@ -158,16 +274,44 @@ However if you don't have a domain, or need to add users outside of your domain,
|
||||
4. Click `Create Credentials` -> `OAuth client ID`
|
||||
5. Under `Application Type`, choose `Web Application`
|
||||
6. For `Name`, enter whatever you like
|
||||
7. Under `Authorised redirect URIs`, use `https://example.com/oidc/callback`, replacing example.com with your headscale URL.
|
||||
7. Under `Authorised redirect URIs`, add Headscale's OIDC callback URL: `https://headscale.example.com/oidc/callback`
|
||||
8. Click `Save` at the bottom of the form
|
||||
9. Take note of the `Client ID` and `Client secret`, you can also download it for reference if you need it.
|
||||
10. Edit your headscale config, under `oidc`, filling in your `client_id` and `client_secret`:
|
||||
```yaml
|
||||
oidc:
|
||||
issuer: "https://accounts.google.com"
|
||||
client_id: ""
|
||||
client_secret: ""
|
||||
scope: ["openid", "profile", "email"]
|
||||
```
|
||||
10. [Configure Headscale following the "Basic configuration" steps](#basic-configuration). The issuer URL for Google
|
||||
OAuth is: `https://accounts.google.com`.
|
||||
|
||||
You can also use `allowed_domains` and `allowed_users` to restrict the users who can authenticate.
|
||||
### Kanidm
|
||||
|
||||
- Kanidm is fully supported by Headscale.
|
||||
- Groups for the [allowed groups filter](#authorize-users-with-filters) need to be specified with their full SPN, for
|
||||
example: `headscale_users@sso.example.com`.
|
||||
|
||||
### Keycloak
|
||||
|
||||
Keycloak is fully supported by Headscale.
|
||||
|
||||
#### Additional configuration to use the allowed groups filter
|
||||
|
||||
Keycloak has no built-in client scope for the OIDC `groups` claim. This extra configuration step is **only** needed if
|
||||
you need to [authorize access based on group membership](#authorize-users-with-filters).
|
||||
|
||||
- Create a new client scope `groups` for OpenID Connect:
|
||||
- Configure a `Group Membership` mapper with name `groups` and the token claim name `groups`.
|
||||
- Enable the mapper for the ID Token, Access Token and UserInfo endpoint.
|
||||
- Configure the new client scope for your Headscale client:
|
||||
- Edit the Headscale client.
|
||||
- Search for the client scope `group`.
|
||||
- Add it with assigned type `Default`.
|
||||
- [Configure the allowed groups in Headscale](#authorize-users-with-filters). Keep in mind that groups in Keycloak start
|
||||
with a leading `/`.
|
||||
|
||||
### Microsoft Entra ID
|
||||
|
||||
In order to integrate Headscale with Microsoft Entra ID, you'll need to provision an App Registration with the correct
|
||||
scopes and redirect URI.
|
||||
|
||||
[Configure Headscale following the "Basic configuration" steps](#basic-configuration). The issuer URL for Microsoft
|
||||
Entra ID is: `https://login.microsoftonline.com/<tenant-UUID>/v2.0`. The following `extra_params` might be useful:
|
||||
|
||||
- `domain_hint: example.com` to use your own domain
|
||||
- `prompt: select_account` to force an account picker during login
|
||||
|
||||
@@ -54,7 +54,7 @@ headscale apikeys expire --prefix "<PREFIX>"
|
||||
|
||||
=== "Minimal YAML configuration file"
|
||||
|
||||
```yaml
|
||||
```yaml title="config.yaml"
|
||||
cli:
|
||||
address: <HEADSCALE_ADDRESS>:<PORT>
|
||||
api_key: <API_KEY_FROM_PREVIOUS_STEP>
|
||||
@@ -69,7 +69,7 @@ headscale apikeys expire --prefix "<PREFIX>"
|
||||
|
||||
!!! bug
|
||||
|
||||
Headscale 0.23.0 requires at least an empty configuration file when environment variables are used to
|
||||
Headscale currently requires at least an empty configuration file when environment variables are used to
|
||||
specify connection details. See [issue 2193](https://github.com/juanfont/headscale/issues/2193) for more
|
||||
information.
|
||||
|
||||
|
||||
271
docs/ref/routes.md
Normal file
271
docs/ref/routes.md
Normal file
@@ -0,0 +1,271 @@
|
||||
# Routes
|
||||
|
||||
Headscale supports route advertising and can be used to manage [subnet routers](https://tailscale.com/kb/1019/subnets)
|
||||
and [exit nodes](https://tailscale.com/kb/1103/exit-nodes) for a tailnet.
|
||||
|
||||
- [Subnet routers](#subnet-router) may be used to connect an existing network such as a virtual
|
||||
private cloud or an on-premise network with your tailnet. Use a subnet router to access devices where Tailscale can't
|
||||
be installed or to gradually rollout Tailscale.
|
||||
- [Exit nodes](#exit-node) can be used to route all Internet traffic for another Tailscale
|
||||
node. Use it to securely access the Internet on an untrusted Wi-Fi or to access online services that expect traffic
|
||||
from a specific IP address.
|
||||
|
||||
## Subnet router
|
||||
|
||||
The setup of a subnet router requires double opt-in, once from a subnet router and once on the control server to allow
|
||||
its use within the tailnet. Optionally, use [`autoApprovers` to automatically approve routes from a subnet
|
||||
router](#automatically-approve-routes-of-a-subnet-router).
|
||||
|
||||
### Setup a subnet router
|
||||
|
||||
#### Configure a node as subnet router
|
||||
|
||||
Register a node and advertise the routes it should handle as comma separated list:
|
||||
|
||||
```console
|
||||
$ sudo tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-routes=10.0.0.0/8,192.168.0.0/24
|
||||
```
|
||||
|
||||
If the node is already registered, it can advertise new routes or update previously announced routes with:
|
||||
|
||||
```console
|
||||
$ sudo tailscale set --advertise-routes=10.0.0.0/8,192.168.0.0/24
|
||||
```
|
||||
|
||||
Finally, [enable IP forwarding](#enable-ip-forwarding) to route traffic.
|
||||
|
||||
#### Enable the subnet router on the control server
|
||||
|
||||
The routes of a tailnet can be displayed with the `headscale nodes list-routes` command. A subnet router with the
|
||||
hostname `myrouter` announced the IPv4 networks `10.0.0.0/8` and `192.168.0.0/24`. Those need to be approved before they
|
||||
can be used.
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myrouter | | 10.0.0.0/8, 192.168.0.0/24 |
|
||||
```
|
||||
|
||||
Approve all desired routes of a subnet router by specifying them as comma separated list:
|
||||
|
||||
```console
|
||||
$ headscale nodes approve-routes --identifier 1 --routes 10.0.0.0/8,192.168.0.0/24
|
||||
Node updated
|
||||
```
|
||||
|
||||
The node `myrouter` can now route the IPv4 networks `10.0.0.0/8` and `192.168.0.0/24` for the tailnet.
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myrouter | 10.0.0.0/8, 192.168.0.0/24 | 10.0.0.0/8, 192.168.0.0/24 | 10.0.0.0/8, 192.168.0.0/24
|
||||
```
|
||||
|
||||
#### Use the subnet router
|
||||
|
||||
To accept routes advertised by a subnet router on a node:
|
||||
|
||||
```console
|
||||
$ sudo tailscale set --accept-routes
|
||||
```
|
||||
|
||||
Please refer to the official [Tailscale
|
||||
documentation](https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-devices) for how to use a subnet
|
||||
router on different operating systems.
|
||||
|
||||
### Restrict the use of a subnet router with ACL
|
||||
|
||||
The routes announced by subnet routers are available to the nodes in a tailnet. By default, without an ACL enabled, all
|
||||
nodes can accept and use such routes. Configure an ACL to explicitly manage who can use routes.
|
||||
|
||||
The ACL snippet below defines three hosts, a subnet router `router`, a regular node `node` and `service.example.net` as
|
||||
internal service that can be reached via a route on the subnet router `router`. It allows the node `node` to access
|
||||
`service.example.net` on port 80 and 443 which is reachable via the subnet router. Access to the subnet router itself is
|
||||
denied.
|
||||
|
||||
```json title="Access the routes of a subnet router without the subnet router itself"
|
||||
{
|
||||
"hosts": {
|
||||
// the router is not referenced but announces 192.168.0.0/24"
|
||||
"router": "100.64.0.1/32",
|
||||
"node": "100.64.0.2/32",
|
||||
"service.example.net": "192.168.0.1/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["node"],
|
||||
"dst": ["service.example.net:80,443"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Automatically approve routes of a subnet router
|
||||
|
||||
The initial setup of a subnet router usually requires manual approval of their announced routes on the control server
|
||||
before they can be used by a node in a tailnet. Headscale supports the `autoApprovers` section of an ACL to automate the
|
||||
approval of routes served with a subnet router.
|
||||
|
||||
The ACL snippet below defines the tag `tag:router` owned by the user `alice`. This tag is used for `routes` in the
|
||||
`autoApprovers` section. The IPv4 route `192.168.0.0/24` is automatically approved once announced by a subnet router
|
||||
owned by the user `alice` and that also advertises the tag `tag:router`.
|
||||
|
||||
```json title="Subnet routers owned by alice and tagged with tag:router are automatically approved"
|
||||
{
|
||||
"tagOwners": {
|
||||
"tag:router": ["alice@"]
|
||||
},
|
||||
"autoApprovers": {
|
||||
"routes": {
|
||||
"192.168.0.0/24": ["tag:router"]
|
||||
}
|
||||
},
|
||||
"acls": [
|
||||
// more rules
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Advertise the route `192.168.0.0/24` from a subnet router that also advertises the tag `tag:router` when joining the tailnet:
|
||||
|
||||
```console
|
||||
$ sudo tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags tag:router --advertise-routes 192.168.0.0/24
|
||||
```
|
||||
|
||||
Please see the [official Tailscale documentation](https://tailscale.com/kb/1337/acl-syntax#autoapprovers) for more
|
||||
information on auto approvers.
|
||||
|
||||
## Exit node
|
||||
|
||||
The setup of an exit node requires double opt-in, once from an exit node and once on the control server to allow its use
|
||||
within the tailnet. Optionally, use [`autoApprovers` to automatically approve an exit
|
||||
node](#automatically-approve-an-exit-node-with-auto-approvers).
|
||||
|
||||
### Setup an exit node
|
||||
|
||||
#### Configure a node as exit node
|
||||
|
||||
Register a node and make it advertise itself as an exit node:
|
||||
|
||||
```console
|
||||
$ sudo tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-exit-node
|
||||
```
|
||||
|
||||
If the node is already registered, it can advertise exit capabilities like this:
|
||||
|
||||
```console
|
||||
$ sudo tailscale set --advertise-exit-node
|
||||
```
|
||||
|
||||
Finally, [enable IP forwarding](#enable-ip-forwarding) to route traffic.
|
||||
|
||||
#### Enable the exit node on the control server
|
||||
|
||||
The routes of a tailnet can be displayed with the `headscale nodes list-routes` command. An exit node can be recognized
|
||||
by its announced routes: `0.0.0.0/0` for IPv4 and `::/0` for IPv6. The exit node with the hostname `myexit` is already
|
||||
available, but needs to be approved:
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myexit | | 0.0.0.0/0, ::/0 |
|
||||
```
|
||||
|
||||
For exit nodes, it is sufficient to approve either the IPv4 or IPv6 route. The other will be approved automatically.
|
||||
|
||||
```console
|
||||
$ headscale nodes approve-routes --identifier 1 --routes 0.0.0.0/0
|
||||
Node updated
|
||||
```
|
||||
|
||||
The node `myexit` is now approved as exit node for the tailnet:
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myexit | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0
|
||||
```
|
||||
|
||||
#### Use the exit node
|
||||
|
||||
The exit node can now be used on a node with:
|
||||
|
||||
```console
|
||||
$ sudo tailscale set --exit-node myexit
|
||||
```
|
||||
|
||||
Please refer to the official [Tailscale documentation](https://tailscale.com/kb/1103/exit-nodes#use-the-exit-node) for
|
||||
how to use an exit node on different operating systems.
|
||||
|
||||
### Restrict the use of an exit node with ACL
|
||||
|
||||
An exit node is offered to all nodes in a tailnet. By default, without an ACL enabled, all nodes in a tailnet can select
|
||||
and use an exit node. Configure `autogroup:internet` in an ACL rule to restrict who can use _any_ of the available exit
|
||||
nodes.
|
||||
|
||||
```json title="Example use of autogroup:internet"
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["..."],
|
||||
"dst": ["autogroup:internet:*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Automatically approve an exit node with auto approvers
|
||||
|
||||
The initial setup of an exit node usually requires manual approval on the control server before it can be used by a node
|
||||
in a tailnet. Headscale supports the `autoApprovers` section of an ACL to automate the approval of a new exit node as
|
||||
soon as it joins the tailnet.
|
||||
|
||||
The ACL snippet below defines the tag `tag:exit` owned by the user `alice`. This tag is used for `exitNode` in the
|
||||
`autoApprovers` section. A new exit node which is owned by the user `alice` and that also advertises the tag `tag:exit`
|
||||
is automatically approved:
|
||||
|
||||
```json title="Exit nodes owned by alice and tagged with tag:exit are automatically approved"
|
||||
{
|
||||
"tagOwners": {
|
||||
"tag:exit": ["alice@"]
|
||||
},
|
||||
"autoApprovers": {
|
||||
"exitNode": ["tag:exit"]
|
||||
},
|
||||
"acls": [
|
||||
// more rules
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Advertise a node as exit node and also advertise the tag `tag:exit` when joining the tailnet:
|
||||
|
||||
```console
|
||||
$ sudo tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags tag:exit --advertise-exit-node
|
||||
```
|
||||
|
||||
Please see the [official Tailscale documentation](https://tailscale.com/kb/1337/acl-syntax#autoapprovers) for more
|
||||
information on auto approvers.
|
||||
|
||||
## High availability
|
||||
|
||||
Headscale has limited support for high availability routing. Multiple subnet routers with overlapping routes or multiple
|
||||
exit nodes can be used to provide high availability for users. If one router node goes offline, another one can serve
|
||||
the same routes to clients. Please see the official [Tailscale documentation on high
|
||||
availability](https://tailscale.com/kb/1115/high-availability#subnet-router-high-availability) for details.
|
||||
|
||||
!!! bug
|
||||
|
||||
In certain situations it might take up to 16 minutes for Headscale to detect a node as offline. A failover node
|
||||
might not be selected fast enough, if such a node is used as subnet router or exit node causing service
|
||||
interruptions for clients. See [issue 2129](https://github.com/juanfont/headscale/issues/2129) for more information.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Enable IP forwarding
|
||||
|
||||
A subnet router or exit node is routing traffic on behalf of other nodes and thus requires IP forwarding. Check the
|
||||
official [Tailscale documentation](https://tailscale.com/kb/1019/subnets/?tab=linux#enable-ip-forwarding) for how to
|
||||
enable IP forwarding.
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
## Bring your own certificate
|
||||
|
||||
Headscale can be configured to expose its web service via TLS. To configure the certificate and key file manually, set the `tls_cert_path` and `tls_cert_path` configuration parameters. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
Headscale can be configured to expose its web service via TLS. To configure the certificate and key file manually, set the `tls_cert_path` and `tls_key_path` configuration parameters. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
|
||||
```yaml
|
||||
```yaml title="config.yaml"
|
||||
tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
```
|
||||
@@ -15,7 +15,7 @@ The certificate should contain the full chain, else some clients, like the Tails
|
||||
|
||||
To get a certificate automatically via [Let's Encrypt](https://letsencrypt.org/), set `tls_letsencrypt_hostname` to the desired certificate hostname. This name must resolve to the IP address(es) headscale is reachable on (i.e., it must correspond to the `server_url` configuration parameter). The certificate and Let's Encrypt account credentials will be stored in the directory configured in `tls_letsencrypt_cache_dir`. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
|
||||
```yaml
|
||||
```yaml title="config.yaml"
|
||||
tls_letsencrypt_hostname: ""
|
||||
tls_letsencrypt_listen: ":http"
|
||||
tls_letsencrypt_cache_dir: ".cache"
|
||||
@@ -52,7 +52,7 @@ If you want to validate that certificate renewal completed successfully, this ca
|
||||
1. Open the URL for your headscale server in your browser of choice, and manually inspecting the expiry date of the certificate you receive.
|
||||
2. Or, check remotely from CLI using `openssl`:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ openssl s_client -servername [hostname] -connect [hostname]:443 | openssl x509 -noout -dates
|
||||
(...)
|
||||
notBefore=Feb 8 09:48:26 2024 GMT
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
# Running headscale in a cloud
|
||||
|
||||
!!! warning "Community documentation"
|
||||
|
||||
This page is not actively maintained by the headscale authors and is
|
||||
written by community members. It is _not_ verified by headscale developers.
|
||||
|
||||
**It might be outdated and it might miss necessary steps**.
|
||||
|
||||
## Sealos
|
||||
|
||||
[Deploy headscale as service on Sealos.](https://icloudnative.io/en/posts/how-to-set-up-or-migrate-headscale/)
|
||||
|
||||
1. Click the following prebuilt template:
|
||||
|
||||
[](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dheadscale)
|
||||
|
||||
2. Click "Deploy Application" on the template page to start deployment. Upon completion, two applications appear: headscale, and one of its [web interfaces](../../ref/integration/web-ui.md).
|
||||
3. Once deployment concludes, click 'Details' on the headscale application page to navigate to the application's details.
|
||||
4. Wait for the application's status to switch to running. For accessing the headscale server, the Public Address associated with port 8080 is the address of the headscale server. To access the headscale console, simply append `/admin/` to the headscale public URL.
|
||||
|
||||
!!! tip "Remote CLI"
|
||||
|
||||
Headscale can be managed remotely via its remote CLI support. See our [Controlling headscale with remote
|
||||
CLI](../../ref/remote-cli.md) documentation for details.
|
||||
@@ -4,7 +4,7 @@ Several Linux distributions and community members provide packages for headscale
|
||||
the [official releases](./official.md) provided by the headscale maintainers. Such packages offer improved integration
|
||||
for their targeted operating system and usually:
|
||||
|
||||
- setup a dedicated user account to run headscale
|
||||
- setup a dedicated local user account to run headscale
|
||||
- provide a default configuration
|
||||
- install headscale as system service
|
||||
|
||||
|
||||
@@ -7,65 +7,64 @@
|
||||
|
||||
**It might be outdated and it might miss necessary steps**.
|
||||
|
||||
This documentation has the goal of showing a user how-to set up and run headscale in a container.
|
||||
[Docker](https://www.docker.com) is used as the reference container implementation, but there is no reason that it should
|
||||
not work with alternatives like [Podman](https://podman.io). The Docker image can be found on Docker Hub [here](https://hub.docker.com/r/headscale/headscale).
|
||||
This documentation has the goal of showing a user how-to set up and run headscale in a container. A container runtime
|
||||
such as [Docker](https://www.docker.com) or [Podman](https://podman.io) is required. The container image can be found on
|
||||
[Docker Hub](https://hub.docker.com/r/headscale/headscale) and [GitHub Container
|
||||
Registry](https://github.com/juanfont/headscale/pkgs/container/headscale). The container image URLs are:
|
||||
|
||||
- [Docker Hub](https://hub.docker.com/r/headscale/headscale): `docker.io/headscale/headscale:<VERSION>`
|
||||
- [GitHub Container Registry](https://github.com/juanfont/headscale/pkgs/container/headscale):
|
||||
`ghcr.io/juanfont/headscale:<VERSION>`
|
||||
|
||||
## Configure and run headscale
|
||||
|
||||
1. Prepare a directory on the host Docker node in your directory of choice, used to hold headscale configuration and the [SQLite](https://www.sqlite.org/) database:
|
||||
1. Create a directory on the Docker host to store headscale's [configuration](../../ref/configuration.md) and the [SQLite](https://www.sqlite.org/) database:
|
||||
|
||||
```shell
|
||||
mkdir -p ./headscale/config
|
||||
mkdir -p ./headscale/{config,lib,run}
|
||||
cd ./headscale
|
||||
```
|
||||
|
||||
1. Download the example configuration for your chosen version and save it as: `/etc/headscale/config.yaml`. Adjust the
|
||||
1. Download the example configuration for your chosen version and save it as: `$(pwd)/config/config.yaml`. Adjust the
|
||||
configuration to suit your local environment. See [Configuration](../../ref/configuration.md) for details.
|
||||
|
||||
```shell
|
||||
sudo mkdir -p /etc/headscale
|
||||
sudo nano /etc/headscale/config.yaml
|
||||
```
|
||||
|
||||
Alternatively, you can mount `/var/lib` and `/var/run` from your host system by adding
|
||||
`--volume $(pwd)/lib:/var/lib/headscale` and `--volume $(pwd)/run:/var/run/headscale`
|
||||
in the next step.
|
||||
|
||||
1. Start the headscale server while working in the host headscale directory:
|
||||
1. Start headscale from within the previously created `./headscale` directory:
|
||||
|
||||
```shell
|
||||
docker run \
|
||||
--name headscale \
|
||||
--detach \
|
||||
--volume $(pwd)/config:/etc/headscale/ \
|
||||
--volume "$(pwd)/config:/etc/headscale" \
|
||||
--volume "$(pwd)/lib:/var/lib/headscale" \
|
||||
--volume "$(pwd)/run:/var/run/headscale" \
|
||||
--publish 127.0.0.1:8080:8080 \
|
||||
--publish 127.0.0.1:9090:9090 \
|
||||
headscale/headscale:<VERSION> \
|
||||
docker.io/headscale/headscale:<VERSION> \
|
||||
serve
|
||||
```
|
||||
|
||||
Note: use `0.0.0.0:8080:8080` instead of `127.0.0.1:8080:8080` if you want to expose the container externally.
|
||||
|
||||
This command will mount `config/` under `/etc/headscale`, forward port 8080 out of the container so the
|
||||
headscale instance becomes available and then detach so headscale runs in the background.
|
||||
This command mounts the local directories inside the container, forwards port 8080 and 9090 out of the container so
|
||||
the headscale instance becomes available and then detaches so headscale runs in the background.
|
||||
|
||||
Example `docker-compose.yaml`
|
||||
|
||||
```yaml
|
||||
version: "3.7"
|
||||
A similar configuration for `docker-compose`:
|
||||
|
||||
```yaml title="docker-compose.yaml"
|
||||
services:
|
||||
headscale:
|
||||
image: headscale/headscale:<VERSION>
|
||||
image: docker.io/headscale/headscale:<VERSION>
|
||||
restart: unless-stopped
|
||||
container_name: headscale
|
||||
ports:
|
||||
- "127.0.0.1:8080:8080"
|
||||
- "127.0.0.1:9090:9090"
|
||||
volumes:
|
||||
# Please change <CONFIG_PATH> to the fullpath of the config folder just created
|
||||
- <CONFIG_PATH>:/etc/headscale
|
||||
# Please set <HEADSCALE_PATH> to the absolute path
|
||||
# of the previously created headscale directory.
|
||||
- <HEADSCALE_PATH>/config:/etc/headscale
|
||||
- <HEADSCALE_PATH>/lib:/var/lib/headscale
|
||||
- <HEADSCALE_PATH>/run:/var/run/headscale
|
||||
command: serve
|
||||
```
|
||||
|
||||
@@ -89,7 +88,7 @@ not work with alternatives like [Podman](https://podman.io). The Docker image ca
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
1. Create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
1. Create a headscale user:
|
||||
|
||||
```shell
|
||||
docker exec -it headscale \
|
||||
@@ -98,7 +97,7 @@ not work with alternatives like [Podman](https://podman.io). The Docker image ca
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
On a client machine, execute the `tailscale` login command:
|
||||
On a client machine, execute the `tailscale up` command to login:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL
|
||||
@@ -111,16 +110,16 @@ docker exec -it headscale \
|
||||
headscale nodes register --user myfirstuser --key <YOUR_MACHINE_KEY>
|
||||
```
|
||||
|
||||
### Register machine using a pre authenticated key
|
||||
### Register a machine using a pre authenticated key
|
||||
|
||||
Generate a key using the command line:
|
||||
Generate a key using the command line for the user with ID 1:
|
||||
|
||||
```shell
|
||||
docker exec -it headscale \
|
||||
headscale preauthkeys create --user myfirstuser --reusable --expiration 24h
|
||||
headscale preauthkeys create --user 1 --reusable --expiration 24h
|
||||
```
|
||||
|
||||
This will return a pre-authenticated key that can be used to connect a node to headscale during the `tailscale` command:
|
||||
This will return a pre-authenticated key that can be used to connect a node to headscale with the `tailscale up` command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
@@ -128,11 +127,11 @@ tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
|
||||
## Debugging headscale running in Docker
|
||||
|
||||
The `headscale/headscale` Docker container is based on a "distroless" image that does not contain a shell or any other debug tools. If you need to debug your application running in the Docker container, you can use the `-debug` variant, for example `headscale/headscale:x.x.x-debug`.
|
||||
The Headscale container image is based on a "distroless" image that does not contain a shell or any other debug tools. If you need to debug headscale running in the Docker container, you can use the `-debug` variant, for example `docker.io/headscale/headscale:x.x.x-debug`.
|
||||
|
||||
### Running the debug Docker container
|
||||
|
||||
To run the debug Docker container, use the exact same commands as above, but replace `headscale/headscale:x.x.x` with `headscale/headscale:x.x.x-debug` (`x.x.x` is the version of headscale). The two containers are compatible with each other, so you can alternate between them.
|
||||
To run the debug Docker container, use the exact same commands as above, but replace `docker.io/headscale/headscale:x.x.x` with `docker.io/headscale/headscale:x.x.x-debug` (`x.x.x` is the version of headscale). The two containers are compatible with each other, so you can alternate between them.
|
||||
|
||||
### Executing commands in the debug container
|
||||
|
||||
@@ -142,14 +141,14 @@ Additionally, the debug container includes a minimalist Busybox shell.
|
||||
|
||||
To launch a shell in the container, use:
|
||||
|
||||
```
|
||||
docker run -it headscale/headscale:x.x.x-debug sh
|
||||
```shell
|
||||
docker run -it docker.io/headscale/headscale:x.x.x-debug sh
|
||||
```
|
||||
|
||||
You can also execute commands directly, such as `ls /ko-app` in this example:
|
||||
|
||||
```
|
||||
docker run headscale/headscale:x.x.x-debug ls /ko-app
|
||||
```shell
|
||||
docker run docker.io/headscale/headscale:x.x.x-debug ls /ko-app
|
||||
```
|
||||
|
||||
Using `docker exec -it` allows you to run commands in an existing container.
|
||||
|
||||
@@ -6,8 +6,8 @@ Both are available on the [GitHub releases page](https://github.com/juanfont/hea
|
||||
## Using packages for Debian/Ubuntu (recommended)
|
||||
|
||||
It is recommended to use our DEB packages to install headscale on a Debian based system as those packages configure a
|
||||
user to run headscale, provide a default configuration and ship with a systemd service file. Supported distributions are
|
||||
Ubuntu 20.04 or newer, Debian 11 or newer.
|
||||
local user to run headscale, provide a default configuration and ship with a systemd service file. Supported
|
||||
distributions are Ubuntu 22.04 or newer, Debian 11 or newer.
|
||||
|
||||
1. Download the [latest headscale package](https://github.com/juanfont/headscale/releases/latest) for your platform (`.deb` for Ubuntu and Debian).
|
||||
|
||||
@@ -46,13 +46,13 @@ Ubuntu 20.04 or newer, Debian 11 or newer.
|
||||
|
||||
!!! warning "Advanced"
|
||||
|
||||
This installation method is considered advanced as one needs to take care of the headscale user and the systemd
|
||||
This installation method is considered advanced as one needs to take care of the local user and the systemd
|
||||
service themselves. If possible, use the [DEB packages](#using-packages-for-debianubuntu-recommended) or a
|
||||
[community package](./community.md) instead.
|
||||
|
||||
This section describes the installation of headscale according to the [Requirements and
|
||||
assumptions](../requirements.md#assumptions). Headscale is run by a dedicated user and the service itself is managed by
|
||||
systemd.
|
||||
assumptions](../requirements.md#assumptions). Headscale is run by a dedicated local user and the service itself is
|
||||
managed by systemd.
|
||||
|
||||
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
|
||||
|
||||
@@ -67,7 +67,7 @@ systemd.
|
||||
sudo chmod +x /usr/local/bin/headscale
|
||||
```
|
||||
|
||||
1. Add a dedicated user to run headscale:
|
||||
1. Add a dedicated local user to run headscale:
|
||||
|
||||
```shell
|
||||
sudo useradd \
|
||||
@@ -87,14 +87,14 @@ systemd.
|
||||
sudo nano /etc/headscale/config.yaml
|
||||
```
|
||||
|
||||
1. Copy [headscale's systemd service file](../../packaging/headscale.systemd.service) to
|
||||
`/etc/systemd/system/headscale.service` and adjust it to suit your local setup. The following parameters likely need
|
||||
1. Copy [headscale's systemd service file](https://github.com/juanfont/headscale/blob/main/packaging/systemd/headscale.service)
|
||||
to `/etc/systemd/system/headscale.service` and adjust it to suit your local setup. The following parameters likely need
|
||||
to be modified: `ExecStart`, `WorkingDirectory`, `ReadWritePaths`.
|
||||
|
||||
1. In `/etc/headscale/config.yaml`, override the default `headscale` unix socket with a path that is writable by the
|
||||
`headscale` user or group:
|
||||
|
||||
```yaml
|
||||
```yaml title="config.yaml"
|
||||
unix_socket: /var/run/headscale/headscale.sock
|
||||
```
|
||||
|
||||
|
||||
@@ -16,8 +16,8 @@ README](https://github.com/juanfont/headscale#contributing) for more information
|
||||
### Install from source
|
||||
|
||||
```shell
|
||||
# Install prerequistes
|
||||
pkg_add go
|
||||
# Install prerequisites
|
||||
pkg_add go git
|
||||
|
||||
git clone https://github.com/juanfont/headscale.git
|
||||
|
||||
@@ -30,7 +30,7 @@ latestTag=$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
|
||||
git checkout $latestTag
|
||||
|
||||
go build -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$latestTag" github.com/juanfont/headscale
|
||||
go build -ldflags="-s -w -X github.com/juanfont/headscale/hscontrol/types.Version=$latestTag" -X github.com/juanfont/headscale/hscontrol/types.GitCommitHash=HASH" github.com/juanfont/headscale
|
||||
|
||||
# make it executable
|
||||
chmod a+x headscale
|
||||
@@ -42,7 +42,7 @@ cp headscale /usr/local/sbin
|
||||
### Install from source via cross compile
|
||||
|
||||
```shell
|
||||
# Install prerequistes
|
||||
# Install prerequisites
|
||||
# 1. go v1.20+: headscale newer than 0.21 needs go 1.20+ to compile
|
||||
# 2. gmake: Makefile in the headscale repo is written in GNU make syntax
|
||||
|
||||
|
||||
@@ -6,14 +6,14 @@ Headscale should just work as long as the following requirements are met:
|
||||
recommended.
|
||||
- Headscale is served via HTTPS on port 443[^1].
|
||||
- A reasonably modern Linux or BSD based operating system.
|
||||
- A dedicated user account to run headscale.
|
||||
- A dedicated local user account to run headscale.
|
||||
- A little bit of command line knowledge to configure and operate headscale.
|
||||
|
||||
## Assumptions
|
||||
|
||||
The headscale documentation and the provided examples are written with a few assumptions in mind:
|
||||
|
||||
- Headscale is running as system service via a dedicated user `headscale`.
|
||||
- Headscale is running as system service via a dedicated local user `headscale`.
|
||||
- The [configuration](../ref/configuration.md) is loaded from `/etc/headscale/config.yaml`.
|
||||
- SQLite is used as database.
|
||||
- The data directory for headscale (used for private keys, ACLs, SQLite database, …) is located in `/var/lib/headscale`.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Upgrade an existing installation
|
||||
|
||||
An existing headscale installation can be updated to a new version:
|
||||
Update an existing headscale installation to a new version:
|
||||
|
||||
- Read the announcement on the [GitHub releases](https://github.com/juanfont/headscale/releases) page for the new
|
||||
version. It lists the changes of the release along with possible breaking changes.
|
||||
|
||||
@@ -15,14 +15,10 @@ Install the official Tailscale iOS client from the [App Store](https://apps.appl
|
||||
|
||||
### Configuring the headscale URL
|
||||
|
||||
- Open Tailscale and make sure you are _not_ logged in to any account
|
||||
- Open Settings on the iOS device
|
||||
- Scroll down to the `third party apps` section, under `Game Center` or `TV Provider`
|
||||
- Find Tailscale and select it
|
||||
- If the iOS device was previously logged into Tailscale, switch the `Reset Keychain` toggle to `on`
|
||||
- Enter the URL of your headscale instance (e.g `https://headscale.example.com`) under `Alternate Coordination Server URL`
|
||||
- Restart the app by closing it from the iOS app switcher, open the app and select the regular sign in option
|
||||
_(non-SSO)_. It should open up to the headscale authentication page.
|
||||
- Open the Tailscale app
|
||||
- Click the account icon in the top-right corner and select `Log in…`.
|
||||
- Tap the top-right options menu button and select `Use custom coordination server`.
|
||||
- Enter your instance url (e.g `https://headscale.example.com`)
|
||||
- Enter your credentials and log in. Headscale should now be working on your iOS device.
|
||||
|
||||
## macOS
|
||||
@@ -43,7 +39,7 @@ tailscale login --login-server <YOUR_HEADSCALE_URL>
|
||||
|
||||
#### GUI
|
||||
|
||||
- ALT + Click the Tailscale icon in the menu and hover over the Debug menu
|
||||
- Option + Click the Tailscale icon in the menu and hover over the Debug menu
|
||||
- Under `Custom Login Server`, select `Add Account...`
|
||||
- Enter the URL of your headscale instance (e.g `https://headscale.example.com`) and press `Add Account`
|
||||
- Follow the login procedure in the browser
|
||||
|
||||
@@ -41,13 +41,14 @@ options, run:
|
||||
headscale <COMMAND> --help
|
||||
```
|
||||
|
||||
## Manage users
|
||||
## Manage headscale users
|
||||
|
||||
In headscale, a node (also known as machine or device) is always assigned to a specific user, a
|
||||
[tailnet](https://tailscale.com/kb/1136/tailnet/). Such users can be managed with the `headscale users` command. Invoke
|
||||
the built-in help for more information: `headscale users --help`.
|
||||
In headscale, a node (also known as machine or device) is always assigned to a
|
||||
headscale user. Such a headscale user may have many nodes assigned to them and
|
||||
can be managed with the `headscale users` command. Invoke the built-in help for
|
||||
more information: `headscale users --help`.
|
||||
|
||||
### Create a user
|
||||
### Create a headscale user
|
||||
|
||||
=== "Native"
|
||||
|
||||
@@ -62,7 +63,7 @@ the built-in help for more information: `headscale users --help`.
|
||||
headscale users create <USER>
|
||||
```
|
||||
|
||||
### List existing users
|
||||
### List existing headscale users
|
||||
|
||||
=== "Native"
|
||||
|
||||
@@ -116,14 +117,14 @@ headscale instance. By default, the key is valid for one hour and can only be us
|
||||
=== "Native"
|
||||
|
||||
```shell
|
||||
headscale preauthkeys create --user <USER>
|
||||
headscale preauthkeys create --user <USER_ID>
|
||||
```
|
||||
|
||||
=== "Container"
|
||||
|
||||
```shell
|
||||
docker exec -it headscale \
|
||||
headscale preauthkeys create --user <USER>
|
||||
headscale preauthkeys create --user <USER_ID>
|
||||
```
|
||||
|
||||
The command returns the preauthkey on success which is used to connect a node to the headscale instance via the
|
||||
|
||||
6
flake.lock
generated
6
flake.lock
generated
@@ -20,11 +20,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1733376361,
|
||||
"narHash": "sha256-aLJxoTDDSqB+/3orsulE6/qdlX6MzDLIITLZqdgMpqo=",
|
||||
"lastModified": 1752012998,
|
||||
"narHash": "sha256-Q82Ms+FQmgOBkdoSVm+FBpuFoeUAffNerR5yVV7SgT8=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "929116e316068c7318c54eb4d827f7d9756d5e9c",
|
||||
"rev": "2a2130494ad647f953593c4e84ea4df839fbd68c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
50
flake.nix
50
flake.nix
@@ -12,17 +12,16 @@
|
||||
flake-utils,
|
||||
...
|
||||
}: let
|
||||
headscaleVersion =
|
||||
if (self ? shortRev)
|
||||
then self.shortRev
|
||||
else "dev";
|
||||
headscaleVersion = self.shortRev or self.dirtyShortRev;
|
||||
commitHash = self.rev or self.dirtyRev;
|
||||
in
|
||||
{
|
||||
overlay = _: prev: let
|
||||
pkgs = nixpkgs.legacyPackages.${prev.system};
|
||||
buildGo = pkgs.buildGo123Module;
|
||||
buildGo = pkgs.buildGo124Module;
|
||||
vendorHash = "sha256-83L2NMyOwKCHWqcowStJ7Ze/U9CJYhzleDRLrJNhX2g=";
|
||||
in {
|
||||
headscale = buildGo rec {
|
||||
headscale = buildGo {
|
||||
pname = "headscale";
|
||||
version = headscaleVersion;
|
||||
src = pkgs.lib.cleanSource self;
|
||||
@@ -31,12 +30,28 @@
|
||||
checkFlags = ["-short"];
|
||||
|
||||
# When updating go.mod or go.sum, a new sha will need to be calculated,
|
||||
# update this if you have a mismatch after doing a change to thos files.
|
||||
vendorHash = "sha256-NyXMSIVcmPlUhE3LmEsYZQxJdz+e435r+GZC8umQKqQ=";
|
||||
# update this if you have a mismatch after doing a change to those files.
|
||||
inherit vendorHash;
|
||||
|
||||
subPackages = ["cmd/headscale"];
|
||||
|
||||
ldflags = ["-s" "-w" "-X github.com/juanfont/headscale/cmd/headscale/cli.Version=v${version}"];
|
||||
ldflags = [
|
||||
"-s"
|
||||
"-w"
|
||||
"-X github.com/juanfont/headscale/hscontrol/types.Version=${headscaleVersion}"
|
||||
"-X github.com/juanfont/headscale/hscontrol/types.GitCommitHash=${commitHash}"
|
||||
];
|
||||
};
|
||||
|
||||
hi = buildGo {
|
||||
pname = "hi";
|
||||
version = headscaleVersion;
|
||||
src = pkgs.lib.cleanSource self;
|
||||
|
||||
checkFlags = ["-short"];
|
||||
inherit vendorHash;
|
||||
|
||||
subPackages = ["cmd/hi"];
|
||||
};
|
||||
|
||||
protoc-gen-grpc-gateway = buildGo rec {
|
||||
@@ -78,6 +93,9 @@
|
||||
# golangci-lint = prev.golangci-lint.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
# golangci-lint-langserver = prev.golangci-lint.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
|
||||
goreleaser = prev.goreleaser.override {
|
||||
buildGoModule = buildGo;
|
||||
@@ -94,6 +112,10 @@
|
||||
gofumpt = prev.gofumpt.override {
|
||||
buildGoModule = buildGo;
|
||||
};
|
||||
|
||||
# gopls = prev.gopls.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
};
|
||||
}
|
||||
// flake-utils.lib.eachDefaultSystem
|
||||
@@ -102,11 +124,12 @@
|
||||
overlays = [self.overlay];
|
||||
inherit system;
|
||||
};
|
||||
buildDeps = with pkgs; [git go_1_23 gnumake];
|
||||
buildDeps = with pkgs; [git go_1_24 gnumake];
|
||||
devDeps = with pkgs;
|
||||
buildDeps
|
||||
++ [
|
||||
golangci-lint
|
||||
golangci-lint-langserver
|
||||
golines
|
||||
nodePackages.prettier
|
||||
goreleaser
|
||||
@@ -114,6 +137,7 @@
|
||||
gotestsum
|
||||
gotests
|
||||
gofumpt
|
||||
gopls
|
||||
ksh
|
||||
ko
|
||||
yq-go
|
||||
@@ -132,7 +156,11 @@
|
||||
buf
|
||||
clang-tools # clang-format
|
||||
protobuf-language-server
|
||||
];
|
||||
|
||||
# Add hi to make it even easier to use ci runner.
|
||||
hi
|
||||
]
|
||||
++ lib.optional pkgs.stdenv.isLinux [traceroute];
|
||||
|
||||
# Add entry to build a docker image with headscale
|
||||
# caveat: only works on Linux
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.1
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/apikey.proto
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -22,15 +23,14 @@ const (
|
||||
)
|
||||
|
||||
type ApiKey struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
LastSeen *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_seen,json=lastSeen,proto3" json:"last_seen,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
LastSeen *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_seen,json=lastSeen,proto3" json:"last_seen,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ApiKey) Reset() {
|
||||
@@ -99,11 +99,10 @@ func (x *ApiKey) GetLastSeen() *timestamppb.Timestamp {
|
||||
}
|
||||
|
||||
type CreateApiKeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *CreateApiKeyRequest) Reset() {
|
||||
@@ -144,11 +143,10 @@ func (x *CreateApiKeyRequest) GetExpiration() *timestamppb.Timestamp {
|
||||
}
|
||||
|
||||
type CreateApiKeyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
ApiKey string `protobuf:"bytes,1,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ApiKey string `protobuf:"bytes,1,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *CreateApiKeyResponse) Reset() {
|
||||
@@ -189,11 +187,10 @@ func (x *CreateApiKeyResponse) GetApiKey() string {
|
||||
}
|
||||
|
||||
type ExpireApiKeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ExpireApiKeyRequest) Reset() {
|
||||
@@ -234,9 +231,9 @@ func (x *ExpireApiKeyRequest) GetPrefix() string {
|
||||
}
|
||||
|
||||
type ExpireApiKeyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ExpireApiKeyResponse) Reset() {
|
||||
@@ -270,9 +267,9 @@ func (*ExpireApiKeyResponse) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type ListApiKeysRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ListApiKeysRequest) Reset() {
|
||||
@@ -306,11 +303,10 @@ func (*ListApiKeysRequest) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type ListApiKeysResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
ApiKeys []*ApiKey `protobuf:"bytes,1,rep,name=api_keys,json=apiKeys,proto3" json:"api_keys,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ApiKeys []*ApiKey `protobuf:"bytes,1,rep,name=api_keys,json=apiKeys,proto3" json:"api_keys,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ListApiKeysResponse) Reset() {
|
||||
@@ -351,11 +347,10 @@ func (x *ListApiKeysResponse) GetApiKeys() []*ApiKey {
|
||||
}
|
||||
|
||||
type DeleteApiKeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *DeleteApiKeyRequest) Reset() {
|
||||
@@ -396,9 +391,9 @@ func (x *DeleteApiKeyRequest) GetPrefix() string {
|
||||
}
|
||||
|
||||
type DeleteApiKeyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *DeleteApiKeyResponse) Reset() {
|
||||
@@ -433,62 +428,42 @@ func (*DeleteApiKeyResponse) Descriptor() ([]byte, []int) {
|
||||
|
||||
var File_headscale_v1_apikey_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_apikey_proto_rawDesc = []byte{
|
||||
0x0a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe0, 0x01, 0x0a, 0x06, 0x41,
|
||||
0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x3a, 0x0a,
|
||||
0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65,
|
||||
0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65,
|
||||
0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
|
||||
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74,
|
||||
0x65, 0x64, 0x41, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x65,
|
||||
0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
|
||||
0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x22, 0x51, 0x0a,
|
||||
0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x22, 0x2f, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x61, 0x70, 0x69, 0x5f,
|
||||
0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x70, 0x69, 0x4b, 0x65,
|
||||
0x79, 0x22, 0x2d, 0x0a, 0x13, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65,
|
||||
0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66,
|
||||
0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78,
|
||||
0x22, 0x16, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74,
|
||||
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x46,
|
||||
0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79,
|
||||
0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x07, 0x61,
|
||||
0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x2d, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
|
||||
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a,
|
||||
0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70,
|
||||
0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41,
|
||||
0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x29, 0x5a,
|
||||
0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e,
|
||||
0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67,
|
||||
0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
const file_headscale_v1_apikey_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x19headscale/v1/apikey.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\xe0\x01\n" +
|
||||
"\x06ApiKey\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\x04R\x02id\x12\x16\n" +
|
||||
"\x06prefix\x18\x02 \x01(\tR\x06prefix\x12:\n" +
|
||||
"\n" +
|
||||
"expiration\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\n" +
|
||||
"expiration\x129\n" +
|
||||
"\n" +
|
||||
"created_at\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x127\n" +
|
||||
"\tlast_seen\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\blastSeen\"Q\n" +
|
||||
"\x13CreateApiKeyRequest\x12:\n" +
|
||||
"\n" +
|
||||
"expiration\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\n" +
|
||||
"expiration\"/\n" +
|
||||
"\x14CreateApiKeyResponse\x12\x17\n" +
|
||||
"\aapi_key\x18\x01 \x01(\tR\x06apiKey\"-\n" +
|
||||
"\x13ExpireApiKeyRequest\x12\x16\n" +
|
||||
"\x06prefix\x18\x01 \x01(\tR\x06prefix\"\x16\n" +
|
||||
"\x14ExpireApiKeyResponse\"\x14\n" +
|
||||
"\x12ListApiKeysRequest\"F\n" +
|
||||
"\x13ListApiKeysResponse\x12/\n" +
|
||||
"\bapi_keys\x18\x01 \x03(\v2\x14.headscale.v1.ApiKeyR\aapiKeys\"-\n" +
|
||||
"\x13DeleteApiKeyRequest\x12\x16\n" +
|
||||
"\x06prefix\x18\x01 \x01(\tR\x06prefix\"\x16\n" +
|
||||
"\x14DeleteApiKeyResponseB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
|
||||
var (
|
||||
file_headscale_v1_apikey_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_apikey_proto_rawDescData = file_headscale_v1_apikey_proto_rawDesc
|
||||
file_headscale_v1_apikey_proto_rawDescData []byte
|
||||
)
|
||||
|
||||
func file_headscale_v1_apikey_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_apikey_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_apikey_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_apikey_proto_rawDescData)
|
||||
file_headscale_v1_apikey_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_apikey_proto_rawDesc), len(file_headscale_v1_apikey_proto_rawDesc)))
|
||||
})
|
||||
return file_headscale_v1_apikey_proto_rawDescData
|
||||
}
|
||||
@@ -528,7 +503,7 @@ func file_headscale_v1_apikey_proto_init() {
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_apikey_proto_rawDesc,
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_apikey_proto_rawDesc), len(file_headscale_v1_apikey_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 9,
|
||||
NumExtensions: 0,
|
||||
@@ -539,7 +514,6 @@ func file_headscale_v1_apikey_proto_init() {
|
||||
MessageInfos: file_headscale_v1_apikey_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_apikey_proto = out.File
|
||||
file_headscale_v1_apikey_proto_rawDesc = nil
|
||||
file_headscale_v1_apikey_proto_goTypes = nil
|
||||
file_headscale_v1_apikey_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.1
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/device.proto
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -22,12 +23,11 @@ const (
|
||||
)
|
||||
|
||||
type Latency struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
LatencyMs float32 `protobuf:"fixed32,1,opt,name=latency_ms,json=latencyMs,proto3" json:"latency_ms,omitempty"`
|
||||
Preferred bool `protobuf:"varint,2,opt,name=preferred,proto3" json:"preferred,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
LatencyMs float32 `protobuf:"fixed32,1,opt,name=latency_ms,json=latencyMs,proto3" json:"latency_ms,omitempty"`
|
||||
Preferred bool `protobuf:"varint,2,opt,name=preferred,proto3" json:"preferred,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *Latency) Reset() {
|
||||
@@ -75,16 +75,15 @@ func (x *Latency) GetPreferred() bool {
|
||||
}
|
||||
|
||||
type ClientSupports struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
HairPinning bool `protobuf:"varint,1,opt,name=hair_pinning,json=hairPinning,proto3" json:"hair_pinning,omitempty"`
|
||||
Ipv6 bool `protobuf:"varint,2,opt,name=ipv6,proto3" json:"ipv6,omitempty"`
|
||||
Pcp bool `protobuf:"varint,3,opt,name=pcp,proto3" json:"pcp,omitempty"`
|
||||
Pmp bool `protobuf:"varint,4,opt,name=pmp,proto3" json:"pmp,omitempty"`
|
||||
Udp bool `protobuf:"varint,5,opt,name=udp,proto3" json:"udp,omitempty"`
|
||||
Upnp bool `protobuf:"varint,6,opt,name=upnp,proto3" json:"upnp,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
HairPinning bool `protobuf:"varint,1,opt,name=hair_pinning,json=hairPinning,proto3" json:"hair_pinning,omitempty"`
|
||||
Ipv6 bool `protobuf:"varint,2,opt,name=ipv6,proto3" json:"ipv6,omitempty"`
|
||||
Pcp bool `protobuf:"varint,3,opt,name=pcp,proto3" json:"pcp,omitempty"`
|
||||
Pmp bool `protobuf:"varint,4,opt,name=pmp,proto3" json:"pmp,omitempty"`
|
||||
Udp bool `protobuf:"varint,5,opt,name=udp,proto3" json:"udp,omitempty"`
|
||||
Upnp bool `protobuf:"varint,6,opt,name=upnp,proto3" json:"upnp,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ClientSupports) Reset() {
|
||||
@@ -160,15 +159,14 @@ func (x *ClientSupports) GetUpnp() bool {
|
||||
}
|
||||
|
||||
type ClientConnectivity struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Endpoints []string `protobuf:"bytes,1,rep,name=endpoints,proto3" json:"endpoints,omitempty"`
|
||||
Derp string `protobuf:"bytes,2,opt,name=derp,proto3" json:"derp,omitempty"`
|
||||
MappingVariesByDestIp bool `protobuf:"varint,3,opt,name=mapping_varies_by_dest_ip,json=mappingVariesByDestIp,proto3" json:"mapping_varies_by_dest_ip,omitempty"`
|
||||
Latency map[string]*Latency `protobuf:"bytes,4,rep,name=latency,proto3" json:"latency,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
ClientSupports *ClientSupports `protobuf:"bytes,5,opt,name=client_supports,json=clientSupports,proto3" json:"client_supports,omitempty"`
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Endpoints []string `protobuf:"bytes,1,rep,name=endpoints,proto3" json:"endpoints,omitempty"`
|
||||
Derp string `protobuf:"bytes,2,opt,name=derp,proto3" json:"derp,omitempty"`
|
||||
MappingVariesByDestIp bool `protobuf:"varint,3,opt,name=mapping_varies_by_dest_ip,json=mappingVariesByDestIp,proto3" json:"mapping_varies_by_dest_ip,omitempty"`
|
||||
Latency map[string]*Latency `protobuf:"bytes,4,rep,name=latency,proto3" json:"latency,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
ClientSupports *ClientSupports `protobuf:"bytes,5,opt,name=client_supports,json=clientSupports,proto3" json:"client_supports,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ClientConnectivity) Reset() {
|
||||
@@ -237,11 +235,10 @@ func (x *ClientConnectivity) GetClientSupports() *ClientSupports {
|
||||
}
|
||||
|
||||
type GetDeviceRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *GetDeviceRequest) Reset() {
|
||||
@@ -282,10 +279,7 @@ func (x *GetDeviceRequest) GetId() string {
|
||||
}
|
||||
|
||||
type GetDeviceResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Addresses []string `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses,omitempty"`
|
||||
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
User string `protobuf:"bytes,3,opt,name=user,proto3" json:"user,omitempty"`
|
||||
@@ -306,6 +300,8 @@ type GetDeviceResponse struct {
|
||||
EnabledRoutes []string `protobuf:"bytes,18,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
|
||||
AdvertisedRoutes []string `protobuf:"bytes,19,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
|
||||
ClientConnectivity *ClientConnectivity `protobuf:"bytes,20,opt,name=client_connectivity,json=clientConnectivity,proto3" json:"client_connectivity,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *GetDeviceResponse) Reset() {
|
||||
@@ -479,11 +475,10 @@ func (x *GetDeviceResponse) GetClientConnectivity() *ClientConnectivity {
|
||||
}
|
||||
|
||||
type DeleteDeviceRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *DeleteDeviceRequest) Reset() {
|
||||
@@ -524,9 +519,9 @@ func (x *DeleteDeviceRequest) GetId() string {
|
||||
}
|
||||
|
||||
type DeleteDeviceResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *DeleteDeviceResponse) Reset() {
|
||||
@@ -560,11 +555,10 @@ func (*DeleteDeviceResponse) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type GetDeviceRoutesRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *GetDeviceRoutesRequest) Reset() {
|
||||
@@ -605,12 +599,11 @@ func (x *GetDeviceRoutesRequest) GetId() string {
|
||||
}
|
||||
|
||||
type GetDeviceRoutesResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
EnabledRoutes []string `protobuf:"bytes,1,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
|
||||
AdvertisedRoutes []string `protobuf:"bytes,2,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
EnabledRoutes []string `protobuf:"bytes,1,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
|
||||
AdvertisedRoutes []string `protobuf:"bytes,2,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *GetDeviceRoutesResponse) Reset() {
|
||||
@@ -658,12 +651,11 @@ func (x *GetDeviceRoutesResponse) GetAdvertisedRoutes() []string {
|
||||
}
|
||||
|
||||
type EnableDeviceRoutesRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Routes []string `protobuf:"bytes,2,rep,name=routes,proto3" json:"routes,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Routes []string `protobuf:"bytes,2,rep,name=routes,proto3" json:"routes,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *EnableDeviceRoutesRequest) Reset() {
|
||||
@@ -711,12 +703,11 @@ func (x *EnableDeviceRoutesRequest) GetRoutes() []string {
|
||||
}
|
||||
|
||||
type EnableDeviceRoutesResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
EnabledRoutes []string `protobuf:"bytes,1,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
|
||||
AdvertisedRoutes []string `protobuf:"bytes,2,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
EnabledRoutes []string `protobuf:"bytes,1,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
|
||||
AdvertisedRoutes []string `protobuf:"bytes,2,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *EnableDeviceRoutesResponse) Reset() {
|
||||
@@ -765,139 +756,80 @@ func (x *EnableDeviceRoutesResponse) GetAdvertisedRoutes() []string {
|
||||
|
||||
var File_headscale_v1_device_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_device_proto_rawDesc = []byte{
|
||||
0x0a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x64,
|
||||
0x65, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x46, 0x0a, 0x07, 0x4c, 0x61,
|
||||
0x74, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79,
|
||||
0x5f, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e,
|
||||
0x63, 0x79, 0x4d, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65,
|
||||
0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72,
|
||||
0x65, 0x64, 0x22, 0x91, 0x01, 0x0a, 0x0e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x75, 0x70,
|
||||
0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x61, 0x69, 0x72, 0x5f, 0x70, 0x69,
|
||||
0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x68, 0x61, 0x69,
|
||||
0x72, 0x50, 0x69, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x36,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x69, 0x70, 0x76, 0x36, 0x12, 0x10, 0x0a, 0x03,
|
||||
0x70, 0x63, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x70, 0x63, 0x70, 0x12, 0x10,
|
||||
0x0a, 0x03, 0x70, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x70, 0x6d, 0x70,
|
||||
0x12, 0x10, 0x0a, 0x03, 0x75, 0x64, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x75,
|
||||
0x64, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x70, 0x6e, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08,
|
||||
0x52, 0x04, 0x75, 0x70, 0x6e, 0x70, 0x22, 0xe3, 0x02, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e,
|
||||
0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x12, 0x1c, 0x0a,
|
||||
0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09,
|
||||
0x52, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64,
|
||||
0x65, 0x72, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x65, 0x72, 0x70, 0x12,
|
||||
0x38, 0x0a, 0x19, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x65,
|
||||
0x73, 0x5f, 0x62, 0x79, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x08, 0x52, 0x15, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x72, 0x69, 0x65,
|
||||
0x73, 0x42, 0x79, 0x44, 0x65, 0x73, 0x74, 0x49, 0x70, 0x12, 0x47, 0x0a, 0x07, 0x6c, 0x61, 0x74,
|
||||
0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
|
||||
0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x2e, 0x4c, 0x61, 0x74,
|
||||
0x65, 0x6e, 0x63, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6c, 0x61, 0x74, 0x65, 0x6e,
|
||||
0x63, 0x79, 0x12, 0x45, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x75, 0x70,
|
||||
0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e,
|
||||
0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
|
||||
0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x1a, 0x51, 0x0a, 0x0c, 0x4c, 0x61, 0x74,
|
||||
0x65, 0x6e, 0x63, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63,
|
||||
0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x22, 0x0a, 0x10,
|
||||
0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64,
|
||||
0x22, 0xa0, 0x06, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
|
||||
0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65,
|
||||
0x73, 0x73, 0x65, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08,
|
||||
0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
|
||||
0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6c, 0x69, 0x65,
|
||||
0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
|
||||
0x29, 0x0a, 0x10, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x75, 0x70, 0x64, 0x61, 0x74,
|
||||
0x65, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x73,
|
||||
0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x6f, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x63, 0x72,
|
||||
0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
|
||||
0x12, 0x37, 0x0a, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x65, 0x6e, 0x18, 0x0a, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
|
||||
0x08, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x12, 0x2e, 0x0a, 0x13, 0x6b, 0x65, 0x79,
|
||||
0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64,
|
||||
0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x6b, 0x65, 0x79, 0x45, 0x78, 0x70, 0x69, 0x72,
|
||||
0x79, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x78, 0x70,
|
||||
0x69, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
|
||||
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12,
|
||||
0x1e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x18, 0x0d, 0x20,
|
||||
0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x12,
|
||||
0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x0e,
|
||||
0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
|
||||
0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18,
|
||||
0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x4b, 0x65,
|
||||
0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x10, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x3e, 0x0a, 0x1b,
|
||||
0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x5f,
|
||||
0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28,
|
||||
0x08, 0x52, 0x19, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e,
|
||||
0x67, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e,
|
||||
0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x12,
|
||||
0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65,
|
||||
0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10,
|
||||
0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x12, 0x51, 0x0a, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
|
||||
0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69,
|
||||
0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52,
|
||||
0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76,
|
||||
0x69, 0x74, 0x79, 0x22, 0x25, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x65, 0x76,
|
||||
0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x28, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02,
|
||||
0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x6d, 0x0a, 0x17,
|
||||
0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c,
|
||||
0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
|
||||
0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2b,
|
||||
0x0a, 0x11, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x61, 0x64, 0x76, 0x65, 0x72,
|
||||
0x74, 0x69, 0x73, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x19, 0x45,
|
||||
0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x22, 0x70, 0x0a, 0x1a, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65,
|
||||
0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25,
|
||||
0x0a, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69,
|
||||
0x73, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09,
|
||||
0x52, 0x10, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
const file_headscale_v1_device_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x19headscale/v1/device.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"F\n" +
|
||||
"\aLatency\x12\x1d\n" +
|
||||
"\n" +
|
||||
"latency_ms\x18\x01 \x01(\x02R\tlatencyMs\x12\x1c\n" +
|
||||
"\tpreferred\x18\x02 \x01(\bR\tpreferred\"\x91\x01\n" +
|
||||
"\x0eClientSupports\x12!\n" +
|
||||
"\fhair_pinning\x18\x01 \x01(\bR\vhairPinning\x12\x12\n" +
|
||||
"\x04ipv6\x18\x02 \x01(\bR\x04ipv6\x12\x10\n" +
|
||||
"\x03pcp\x18\x03 \x01(\bR\x03pcp\x12\x10\n" +
|
||||
"\x03pmp\x18\x04 \x01(\bR\x03pmp\x12\x10\n" +
|
||||
"\x03udp\x18\x05 \x01(\bR\x03udp\x12\x12\n" +
|
||||
"\x04upnp\x18\x06 \x01(\bR\x04upnp\"\xe3\x02\n" +
|
||||
"\x12ClientConnectivity\x12\x1c\n" +
|
||||
"\tendpoints\x18\x01 \x03(\tR\tendpoints\x12\x12\n" +
|
||||
"\x04derp\x18\x02 \x01(\tR\x04derp\x128\n" +
|
||||
"\x19mapping_varies_by_dest_ip\x18\x03 \x01(\bR\x15mappingVariesByDestIp\x12G\n" +
|
||||
"\alatency\x18\x04 \x03(\v2-.headscale.v1.ClientConnectivity.LatencyEntryR\alatency\x12E\n" +
|
||||
"\x0fclient_supports\x18\x05 \x01(\v2\x1c.headscale.v1.ClientSupportsR\x0eclientSupports\x1aQ\n" +
|
||||
"\fLatencyEntry\x12\x10\n" +
|
||||
"\x03key\x18\x01 \x01(\tR\x03key\x12+\n" +
|
||||
"\x05value\x18\x02 \x01(\v2\x15.headscale.v1.LatencyR\x05value:\x028\x01\"\"\n" +
|
||||
"\x10GetDeviceRequest\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\tR\x02id\"\xa0\x06\n" +
|
||||
"\x11GetDeviceResponse\x12\x1c\n" +
|
||||
"\taddresses\x18\x01 \x03(\tR\taddresses\x12\x0e\n" +
|
||||
"\x02id\x18\x02 \x01(\tR\x02id\x12\x12\n" +
|
||||
"\x04user\x18\x03 \x01(\tR\x04user\x12\x12\n" +
|
||||
"\x04name\x18\x04 \x01(\tR\x04name\x12\x1a\n" +
|
||||
"\bhostname\x18\x05 \x01(\tR\bhostname\x12%\n" +
|
||||
"\x0eclient_version\x18\x06 \x01(\tR\rclientVersion\x12)\n" +
|
||||
"\x10update_available\x18\a \x01(\bR\x0fupdateAvailable\x12\x0e\n" +
|
||||
"\x02os\x18\b \x01(\tR\x02os\x124\n" +
|
||||
"\acreated\x18\t \x01(\v2\x1a.google.protobuf.TimestampR\acreated\x127\n" +
|
||||
"\tlast_seen\x18\n" +
|
||||
" \x01(\v2\x1a.google.protobuf.TimestampR\blastSeen\x12.\n" +
|
||||
"\x13key_expiry_disabled\x18\v \x01(\bR\x11keyExpiryDisabled\x124\n" +
|
||||
"\aexpires\x18\f \x01(\v2\x1a.google.protobuf.TimestampR\aexpires\x12\x1e\n" +
|
||||
"\n" +
|
||||
"authorized\x18\r \x01(\bR\n" +
|
||||
"authorized\x12\x1f\n" +
|
||||
"\vis_external\x18\x0e \x01(\bR\n" +
|
||||
"isExternal\x12\x1f\n" +
|
||||
"\vmachine_key\x18\x0f \x01(\tR\n" +
|
||||
"machineKey\x12\x19\n" +
|
||||
"\bnode_key\x18\x10 \x01(\tR\anodeKey\x12>\n" +
|
||||
"\x1bblocks_incoming_connections\x18\x11 \x01(\bR\x19blocksIncomingConnections\x12%\n" +
|
||||
"\x0eenabled_routes\x18\x12 \x03(\tR\renabledRoutes\x12+\n" +
|
||||
"\x11advertised_routes\x18\x13 \x03(\tR\x10advertisedRoutes\x12Q\n" +
|
||||
"\x13client_connectivity\x18\x14 \x01(\v2 .headscale.v1.ClientConnectivityR\x12clientConnectivity\"%\n" +
|
||||
"\x13DeleteDeviceRequest\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\tR\x02id\"\x16\n" +
|
||||
"\x14DeleteDeviceResponse\"(\n" +
|
||||
"\x16GetDeviceRoutesRequest\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\tR\x02id\"m\n" +
|
||||
"\x17GetDeviceRoutesResponse\x12%\n" +
|
||||
"\x0eenabled_routes\x18\x01 \x03(\tR\renabledRoutes\x12+\n" +
|
||||
"\x11advertised_routes\x18\x02 \x03(\tR\x10advertisedRoutes\"C\n" +
|
||||
"\x19EnableDeviceRoutesRequest\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\tR\x02id\x12\x16\n" +
|
||||
"\x06routes\x18\x02 \x03(\tR\x06routes\"p\n" +
|
||||
"\x1aEnableDeviceRoutesResponse\x12%\n" +
|
||||
"\x0eenabled_routes\x18\x01 \x03(\tR\renabledRoutes\x12+\n" +
|
||||
"\x11advertised_routes\x18\x02 \x03(\tR\x10advertisedRoutesB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
|
||||
var (
|
||||
file_headscale_v1_device_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_device_proto_rawDescData = file_headscale_v1_device_proto_rawDesc
|
||||
file_headscale_v1_device_proto_rawDescData []byte
|
||||
)
|
||||
|
||||
func file_headscale_v1_device_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_device_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_device_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_device_proto_rawDescData)
|
||||
file_headscale_v1_device_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_device_proto_rawDesc), len(file_headscale_v1_device_proto_rawDesc)))
|
||||
})
|
||||
return file_headscale_v1_device_proto_rawDescData
|
||||
}
|
||||
@@ -942,7 +874,7 @@ func file_headscale_v1_device_proto_init() {
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_device_proto_rawDesc,
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_device_proto_rawDesc), len(file_headscale_v1_device_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 12,
|
||||
NumExtensions: 0,
|
||||
@@ -953,7 +885,6 @@ func file_headscale_v1_device_proto_init() {
|
||||
MessageInfos: file_headscale_v1_device_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_device_proto = out.File
|
||||
file_headscale_v1_device_proto_rawDesc = nil
|
||||
file_headscale_v1_device_proto_goTypes = nil
|
||||
file_headscale_v1_device_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.1
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -22,291 +23,90 @@ const (
|
||||
|
||||
var File_headscale_v1_headscale_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_headscale_proto_rawDesc = []byte{
|
||||
0x0a, 0x1c, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x1a, 0x17, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xe9, 0x19, 0x0a,
|
||||
0x10, 0x48, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
|
||||
0x65, 0x12, 0x68, 0x0a, 0x0a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12,
|
||||
0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43,
|
||||
0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x11, 0x3a, 0x01, 0x2a, 0x22, 0x0c, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x12, 0x80, 0x01, 0x0a, 0x0a,
|
||||
0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22, 0x27, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75,
|
||||
0x73, 0x65, 0x72, 0x2f, 0x7b, 0x6f, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x65, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x2f, 0x7b, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x6a,
|
||||
0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65,
|
||||
0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c,
|
||||
0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x2a, 0x11, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x75, 0x73, 0x65, 0x72, 0x2f, 0x7b, 0x69, 0x64, 0x7d, 0x12, 0x62, 0x0a, 0x09, 0x4c, 0x69,
|
||||
0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0e,
|
||||
0x12, 0x0c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x12, 0x80,
|
||||
0x01, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68,
|
||||
0x4b, 0x65, 0x79, 0x12, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68,
|
||||
0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
|
||||
0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x3a, 0x01, 0x2a, 0x22, 0x12, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65,
|
||||
0x79, 0x12, 0x87, 0x01, 0x0a, 0x10, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41,
|
||||
0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41,
|
||||
0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70,
|
||||
0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x3a, 0x01, 0x2a,
|
||||
0x22, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74,
|
||||
0x68, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x7a, 0x0a, 0x0f, 0x4c,
|
||||
0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x24,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69,
|
||||
0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b,
|
||||
0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4,
|
||||
0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65,
|
||||
0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x12, 0x7d, 0x0a, 0x0f, 0x44, 0x65, 0x62, 0x75, 0x67,
|
||||
0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43,
|
||||
0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x3a,
|
||||
0x01, 0x2a, 0x22, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75,
|
||||
0x67, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x66, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64,
|
||||
0x65, 0x12, 0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47,
|
||||
0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x6e,
|
||||
0x0a, 0x07, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x12, 0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01,
|
||||
0x2a, 0x22, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f,
|
||||
0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x74, 0x61, 0x67, 0x73, 0x12, 0x74,
|
||||
0x0a, 0x0c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x21,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65,
|
||||
0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x22, 0x15, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x72, 0x65, 0x67, 0x69,
|
||||
0x73, 0x74, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f,
|
||||
0x64, 0x65, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x2a, 0x16, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64,
|
||||
0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x76, 0x0a, 0x0a, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4e,
|
||||
0x6f, 0x64, 0x65, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x22, 0x1d,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f,
|
||||
0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x81, 0x01,
|
||||
0x0a, 0x0a, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x22, 0x28, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f,
|
||||
0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x2f, 0x7b, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x7d, 0x12, 0x62, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x1e,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69,
|
||||
0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69,
|
||||
0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x14, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0e, 0x12, 0x0c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x71, 0x0a, 0x08, 0x4d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64,
|
||||
0x65, 0x12, 0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x1a, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x4d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01, 0x2a, 0x22, 0x1b, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f,
|
||||
0x69, 0x64, 0x7d, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x12, 0x80, 0x01, 0x0a, 0x0f, 0x42, 0x61, 0x63,
|
||||
0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x12, 0x24, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b,
|
||||
0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
||||
0x1a, 0x22, 0x18, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f,
|
||||
0x62, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x69, 0x70, 0x73, 0x12, 0x64, 0x0a, 0x09, 0x47,
|
||||
0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
||||
0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x73, 0x12, 0x7c, 0x0a, 0x0b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x22, 0x20, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12,
|
||||
0x80, 0x01, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22,
|
||||
0x21, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f,
|
||||
0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x64, 0x69, 0x73, 0x61, 0x62,
|
||||
0x6c, 0x65, 0x12, 0x7f, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3,
|
||||
0xe4, 0x93, 0x02, 0x1f, 0x12, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f,
|
||||
0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x12, 0x75, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x2a,
|
||||
0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f,
|
||||
0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x70, 0x0a, 0x0c, 0x43, 0x72,
|
||||
0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
|
||||
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65,
|
||||
0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x3a, 0x01, 0x2a, 0x22, 0x0e, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x12, 0x77, 0x0a, 0x0c,
|
||||
0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69,
|
||||
0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45,
|
||||
0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x3a, 0x01, 0x2a, 0x22, 0x15,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x65,
|
||||
0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x6a, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69,
|
||||
0x4b, 0x65, 0x79, 0x73, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
||||
0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65,
|
||||
0x79, 0x12, 0x76, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65,
|
||||
0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19,
|
||||
0x2a, 0x17, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79,
|
||||
0x2f, 0x7b, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x7d, 0x12, 0x64, 0x0a, 0x09, 0x47, 0x65, 0x74,
|
||||
0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12,
|
||||
0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
|
||||
0x67, 0x0a, 0x09, 0x53, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1e, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x50,
|
||||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x50,
|
||||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x13, 0x3a, 0x01, 0x2a, 0x1a, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f,
|
||||
0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
const file_headscale_v1_headscale_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x1cheadscale/v1/headscale.proto\x12\fheadscale.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17headscale/v1/user.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/node.proto\x1a\x19headscale/v1/apikey.proto\x1a\x19headscale/v1/policy.proto2\xa3\x16\n" +
|
||||
"\x10HeadscaleService\x12h\n" +
|
||||
"\n" +
|
||||
"CreateUser\x12\x1f.headscale.v1.CreateUserRequest\x1a .headscale.v1.CreateUserResponse\"\x17\x82\xd3\xe4\x93\x02\x11:\x01*\"\f/api/v1/user\x12\x80\x01\n" +
|
||||
"\n" +
|
||||
"RenameUser\x12\x1f.headscale.v1.RenameUserRequest\x1a .headscale.v1.RenameUserResponse\"/\x82\xd3\xe4\x93\x02)\"'/api/v1/user/{old_id}/rename/{new_name}\x12j\n" +
|
||||
"\n" +
|
||||
"DeleteUser\x12\x1f.headscale.v1.DeleteUserRequest\x1a .headscale.v1.DeleteUserResponse\"\x19\x82\xd3\xe4\x93\x02\x13*\x11/api/v1/user/{id}\x12b\n" +
|
||||
"\tListUsers\x12\x1e.headscale.v1.ListUsersRequest\x1a\x1f.headscale.v1.ListUsersResponse\"\x14\x82\xd3\xe4\x93\x02\x0e\x12\f/api/v1/user\x12\x80\x01\n" +
|
||||
"\x10CreatePreAuthKey\x12%.headscale.v1.CreatePreAuthKeyRequest\x1a&.headscale.v1.CreatePreAuthKeyResponse\"\x1d\x82\xd3\xe4\x93\x02\x17:\x01*\"\x12/api/v1/preauthkey\x12\x87\x01\n" +
|
||||
"\x10ExpirePreAuthKey\x12%.headscale.v1.ExpirePreAuthKeyRequest\x1a&.headscale.v1.ExpirePreAuthKeyResponse\"$\x82\xd3\xe4\x93\x02\x1e:\x01*\"\x19/api/v1/preauthkey/expire\x12z\n" +
|
||||
"\x0fListPreAuthKeys\x12$.headscale.v1.ListPreAuthKeysRequest\x1a%.headscale.v1.ListPreAuthKeysResponse\"\x1a\x82\xd3\xe4\x93\x02\x14\x12\x12/api/v1/preauthkey\x12}\n" +
|
||||
"\x0fDebugCreateNode\x12$.headscale.v1.DebugCreateNodeRequest\x1a%.headscale.v1.DebugCreateNodeResponse\"\x1d\x82\xd3\xe4\x93\x02\x17:\x01*\"\x12/api/v1/debug/node\x12f\n" +
|
||||
"\aGetNode\x12\x1c.headscale.v1.GetNodeRequest\x1a\x1d.headscale.v1.GetNodeResponse\"\x1e\x82\xd3\xe4\x93\x02\x18\x12\x16/api/v1/node/{node_id}\x12n\n" +
|
||||
"\aSetTags\x12\x1c.headscale.v1.SetTagsRequest\x1a\x1d.headscale.v1.SetTagsResponse\"&\x82\xd3\xe4\x93\x02 :\x01*\"\x1b/api/v1/node/{node_id}/tags\x12\x96\x01\n" +
|
||||
"\x11SetApprovedRoutes\x12&.headscale.v1.SetApprovedRoutesRequest\x1a'.headscale.v1.SetApprovedRoutesResponse\"0\x82\xd3\xe4\x93\x02*:\x01*\"%/api/v1/node/{node_id}/approve_routes\x12t\n" +
|
||||
"\fRegisterNode\x12!.headscale.v1.RegisterNodeRequest\x1a\".headscale.v1.RegisterNodeResponse\"\x1d\x82\xd3\xe4\x93\x02\x17\"\x15/api/v1/node/register\x12o\n" +
|
||||
"\n" +
|
||||
"DeleteNode\x12\x1f.headscale.v1.DeleteNodeRequest\x1a .headscale.v1.DeleteNodeResponse\"\x1e\x82\xd3\xe4\x93\x02\x18*\x16/api/v1/node/{node_id}\x12v\n" +
|
||||
"\n" +
|
||||
"ExpireNode\x12\x1f.headscale.v1.ExpireNodeRequest\x1a .headscale.v1.ExpireNodeResponse\"%\x82\xd3\xe4\x93\x02\x1f\"\x1d/api/v1/node/{node_id}/expire\x12\x81\x01\n" +
|
||||
"\n" +
|
||||
"RenameNode\x12\x1f.headscale.v1.RenameNodeRequest\x1a .headscale.v1.RenameNodeResponse\"0\x82\xd3\xe4\x93\x02*\"(/api/v1/node/{node_id}/rename/{new_name}\x12b\n" +
|
||||
"\tListNodes\x12\x1e.headscale.v1.ListNodesRequest\x1a\x1f.headscale.v1.ListNodesResponse\"\x14\x82\xd3\xe4\x93\x02\x0e\x12\f/api/v1/node\x12q\n" +
|
||||
"\bMoveNode\x12\x1d.headscale.v1.MoveNodeRequest\x1a\x1e.headscale.v1.MoveNodeResponse\"&\x82\xd3\xe4\x93\x02 :\x01*\"\x1b/api/v1/node/{node_id}/user\x12\x80\x01\n" +
|
||||
"\x0fBackfillNodeIPs\x12$.headscale.v1.BackfillNodeIPsRequest\x1a%.headscale.v1.BackfillNodeIPsResponse\" \x82\xd3\xe4\x93\x02\x1a\"\x18/api/v1/node/backfillips\x12p\n" +
|
||||
"\fCreateApiKey\x12!.headscale.v1.CreateApiKeyRequest\x1a\".headscale.v1.CreateApiKeyResponse\"\x19\x82\xd3\xe4\x93\x02\x13:\x01*\"\x0e/api/v1/apikey\x12w\n" +
|
||||
"\fExpireApiKey\x12!.headscale.v1.ExpireApiKeyRequest\x1a\".headscale.v1.ExpireApiKeyResponse\" \x82\xd3\xe4\x93\x02\x1a:\x01*\"\x15/api/v1/apikey/expire\x12j\n" +
|
||||
"\vListApiKeys\x12 .headscale.v1.ListApiKeysRequest\x1a!.headscale.v1.ListApiKeysResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/apikey\x12v\n" +
|
||||
"\fDeleteApiKey\x12!.headscale.v1.DeleteApiKeyRequest\x1a\".headscale.v1.DeleteApiKeyResponse\"\x1f\x82\xd3\xe4\x93\x02\x19*\x17/api/v1/apikey/{prefix}\x12d\n" +
|
||||
"\tGetPolicy\x12\x1e.headscale.v1.GetPolicyRequest\x1a\x1f.headscale.v1.GetPolicyResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/policy\x12g\n" +
|
||||
"\tSetPolicy\x12\x1e.headscale.v1.SetPolicyRequest\x1a\x1f.headscale.v1.SetPolicyResponse\"\x19\x82\xd3\xe4\x93\x02\x13:\x01*\x1a\x0e/api/v1/policyB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
|
||||
var file_headscale_v1_headscale_proto_goTypes = []any{
|
||||
(*CreateUserRequest)(nil), // 0: headscale.v1.CreateUserRequest
|
||||
(*RenameUserRequest)(nil), // 1: headscale.v1.RenameUserRequest
|
||||
(*DeleteUserRequest)(nil), // 2: headscale.v1.DeleteUserRequest
|
||||
(*ListUsersRequest)(nil), // 3: headscale.v1.ListUsersRequest
|
||||
(*CreatePreAuthKeyRequest)(nil), // 4: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 5: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 6: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateNodeRequest)(nil), // 7: headscale.v1.DebugCreateNodeRequest
|
||||
(*GetNodeRequest)(nil), // 8: headscale.v1.GetNodeRequest
|
||||
(*SetTagsRequest)(nil), // 9: headscale.v1.SetTagsRequest
|
||||
(*RegisterNodeRequest)(nil), // 10: headscale.v1.RegisterNodeRequest
|
||||
(*DeleteNodeRequest)(nil), // 11: headscale.v1.DeleteNodeRequest
|
||||
(*ExpireNodeRequest)(nil), // 12: headscale.v1.ExpireNodeRequest
|
||||
(*RenameNodeRequest)(nil), // 13: headscale.v1.RenameNodeRequest
|
||||
(*ListNodesRequest)(nil), // 14: headscale.v1.ListNodesRequest
|
||||
(*MoveNodeRequest)(nil), // 15: headscale.v1.MoveNodeRequest
|
||||
(*BackfillNodeIPsRequest)(nil), // 16: headscale.v1.BackfillNodeIPsRequest
|
||||
(*GetRoutesRequest)(nil), // 17: headscale.v1.GetRoutesRequest
|
||||
(*EnableRouteRequest)(nil), // 18: headscale.v1.EnableRouteRequest
|
||||
(*DisableRouteRequest)(nil), // 19: headscale.v1.DisableRouteRequest
|
||||
(*GetNodeRoutesRequest)(nil), // 20: headscale.v1.GetNodeRoutesRequest
|
||||
(*DeleteRouteRequest)(nil), // 21: headscale.v1.DeleteRouteRequest
|
||||
(*CreateApiKeyRequest)(nil), // 22: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 23: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 24: headscale.v1.ListApiKeysRequest
|
||||
(*DeleteApiKeyRequest)(nil), // 25: headscale.v1.DeleteApiKeyRequest
|
||||
(*GetPolicyRequest)(nil), // 26: headscale.v1.GetPolicyRequest
|
||||
(*SetPolicyRequest)(nil), // 27: headscale.v1.SetPolicyRequest
|
||||
(*CreateUserResponse)(nil), // 28: headscale.v1.CreateUserResponse
|
||||
(*RenameUserResponse)(nil), // 29: headscale.v1.RenameUserResponse
|
||||
(*DeleteUserResponse)(nil), // 30: headscale.v1.DeleteUserResponse
|
||||
(*ListUsersResponse)(nil), // 31: headscale.v1.ListUsersResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 32: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 33: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 34: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateNodeResponse)(nil), // 35: headscale.v1.DebugCreateNodeResponse
|
||||
(*GetNodeResponse)(nil), // 36: headscale.v1.GetNodeResponse
|
||||
(*SetTagsResponse)(nil), // 37: headscale.v1.SetTagsResponse
|
||||
(*RegisterNodeResponse)(nil), // 38: headscale.v1.RegisterNodeResponse
|
||||
(*DeleteNodeResponse)(nil), // 39: headscale.v1.DeleteNodeResponse
|
||||
(*ExpireNodeResponse)(nil), // 40: headscale.v1.ExpireNodeResponse
|
||||
(*RenameNodeResponse)(nil), // 41: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesResponse)(nil), // 42: headscale.v1.ListNodesResponse
|
||||
(*MoveNodeResponse)(nil), // 43: headscale.v1.MoveNodeResponse
|
||||
(*BackfillNodeIPsResponse)(nil), // 44: headscale.v1.BackfillNodeIPsResponse
|
||||
(*GetRoutesResponse)(nil), // 45: headscale.v1.GetRoutesResponse
|
||||
(*EnableRouteResponse)(nil), // 46: headscale.v1.EnableRouteResponse
|
||||
(*DisableRouteResponse)(nil), // 47: headscale.v1.DisableRouteResponse
|
||||
(*GetNodeRoutesResponse)(nil), // 48: headscale.v1.GetNodeRoutesResponse
|
||||
(*DeleteRouteResponse)(nil), // 49: headscale.v1.DeleteRouteResponse
|
||||
(*CreateApiKeyResponse)(nil), // 50: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 51: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 52: headscale.v1.ListApiKeysResponse
|
||||
(*DeleteApiKeyResponse)(nil), // 53: headscale.v1.DeleteApiKeyResponse
|
||||
(*GetPolicyResponse)(nil), // 54: headscale.v1.GetPolicyResponse
|
||||
(*SetPolicyResponse)(nil), // 55: headscale.v1.SetPolicyResponse
|
||||
(*CreateUserRequest)(nil), // 0: headscale.v1.CreateUserRequest
|
||||
(*RenameUserRequest)(nil), // 1: headscale.v1.RenameUserRequest
|
||||
(*DeleteUserRequest)(nil), // 2: headscale.v1.DeleteUserRequest
|
||||
(*ListUsersRequest)(nil), // 3: headscale.v1.ListUsersRequest
|
||||
(*CreatePreAuthKeyRequest)(nil), // 4: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 5: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 6: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateNodeRequest)(nil), // 7: headscale.v1.DebugCreateNodeRequest
|
||||
(*GetNodeRequest)(nil), // 8: headscale.v1.GetNodeRequest
|
||||
(*SetTagsRequest)(nil), // 9: headscale.v1.SetTagsRequest
|
||||
(*SetApprovedRoutesRequest)(nil), // 10: headscale.v1.SetApprovedRoutesRequest
|
||||
(*RegisterNodeRequest)(nil), // 11: headscale.v1.RegisterNodeRequest
|
||||
(*DeleteNodeRequest)(nil), // 12: headscale.v1.DeleteNodeRequest
|
||||
(*ExpireNodeRequest)(nil), // 13: headscale.v1.ExpireNodeRequest
|
||||
(*RenameNodeRequest)(nil), // 14: headscale.v1.RenameNodeRequest
|
||||
(*ListNodesRequest)(nil), // 15: headscale.v1.ListNodesRequest
|
||||
(*MoveNodeRequest)(nil), // 16: headscale.v1.MoveNodeRequest
|
||||
(*BackfillNodeIPsRequest)(nil), // 17: headscale.v1.BackfillNodeIPsRequest
|
||||
(*CreateApiKeyRequest)(nil), // 18: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 19: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 20: headscale.v1.ListApiKeysRequest
|
||||
(*DeleteApiKeyRequest)(nil), // 21: headscale.v1.DeleteApiKeyRequest
|
||||
(*GetPolicyRequest)(nil), // 22: headscale.v1.GetPolicyRequest
|
||||
(*SetPolicyRequest)(nil), // 23: headscale.v1.SetPolicyRequest
|
||||
(*CreateUserResponse)(nil), // 24: headscale.v1.CreateUserResponse
|
||||
(*RenameUserResponse)(nil), // 25: headscale.v1.RenameUserResponse
|
||||
(*DeleteUserResponse)(nil), // 26: headscale.v1.DeleteUserResponse
|
||||
(*ListUsersResponse)(nil), // 27: headscale.v1.ListUsersResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 28: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 29: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 30: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateNodeResponse)(nil), // 31: headscale.v1.DebugCreateNodeResponse
|
||||
(*GetNodeResponse)(nil), // 32: headscale.v1.GetNodeResponse
|
||||
(*SetTagsResponse)(nil), // 33: headscale.v1.SetTagsResponse
|
||||
(*SetApprovedRoutesResponse)(nil), // 34: headscale.v1.SetApprovedRoutesResponse
|
||||
(*RegisterNodeResponse)(nil), // 35: headscale.v1.RegisterNodeResponse
|
||||
(*DeleteNodeResponse)(nil), // 36: headscale.v1.DeleteNodeResponse
|
||||
(*ExpireNodeResponse)(nil), // 37: headscale.v1.ExpireNodeResponse
|
||||
(*RenameNodeResponse)(nil), // 38: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesResponse)(nil), // 39: headscale.v1.ListNodesResponse
|
||||
(*MoveNodeResponse)(nil), // 40: headscale.v1.MoveNodeResponse
|
||||
(*BackfillNodeIPsResponse)(nil), // 41: headscale.v1.BackfillNodeIPsResponse
|
||||
(*CreateApiKeyResponse)(nil), // 42: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 43: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 44: headscale.v1.ListApiKeysResponse
|
||||
(*DeleteApiKeyResponse)(nil), // 45: headscale.v1.DeleteApiKeyResponse
|
||||
(*GetPolicyResponse)(nil), // 46: headscale.v1.GetPolicyResponse
|
||||
(*SetPolicyResponse)(nil), // 47: headscale.v1.SetPolicyResponse
|
||||
}
|
||||
var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
0, // 0: headscale.v1.HeadscaleService.CreateUser:input_type -> headscale.v1.CreateUserRequest
|
||||
@@ -319,54 +119,46 @@ var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
7, // 7: headscale.v1.HeadscaleService.DebugCreateNode:input_type -> headscale.v1.DebugCreateNodeRequest
|
||||
8, // 8: headscale.v1.HeadscaleService.GetNode:input_type -> headscale.v1.GetNodeRequest
|
||||
9, // 9: headscale.v1.HeadscaleService.SetTags:input_type -> headscale.v1.SetTagsRequest
|
||||
10, // 10: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
|
||||
11, // 11: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
|
||||
12, // 12: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
|
||||
13, // 13: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
|
||||
14, // 14: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
|
||||
15, // 15: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest
|
||||
16, // 16: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
|
||||
17, // 17: headscale.v1.HeadscaleService.GetRoutes:input_type -> headscale.v1.GetRoutesRequest
|
||||
18, // 18: headscale.v1.HeadscaleService.EnableRoute:input_type -> headscale.v1.EnableRouteRequest
|
||||
19, // 19: headscale.v1.HeadscaleService.DisableRoute:input_type -> headscale.v1.DisableRouteRequest
|
||||
20, // 20: headscale.v1.HeadscaleService.GetNodeRoutes:input_type -> headscale.v1.GetNodeRoutesRequest
|
||||
21, // 21: headscale.v1.HeadscaleService.DeleteRoute:input_type -> headscale.v1.DeleteRouteRequest
|
||||
22, // 22: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
23, // 23: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
24, // 24: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
25, // 25: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
|
||||
26, // 26: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest
|
||||
27, // 27: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest
|
||||
28, // 28: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
|
||||
29, // 29: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
|
||||
30, // 30: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
|
||||
31, // 31: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
|
||||
32, // 32: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
33, // 33: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
34, // 34: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
35, // 35: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
|
||||
36, // 36: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
|
||||
37, // 37: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
38, // 38: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
|
||||
39, // 39: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
|
||||
40, // 40: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
|
||||
41, // 41: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
|
||||
42, // 42: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
|
||||
43, // 43: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
|
||||
44, // 44: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
|
||||
45, // 45: headscale.v1.HeadscaleService.GetRoutes:output_type -> headscale.v1.GetRoutesResponse
|
||||
46, // 46: headscale.v1.HeadscaleService.EnableRoute:output_type -> headscale.v1.EnableRouteResponse
|
||||
47, // 47: headscale.v1.HeadscaleService.DisableRoute:output_type -> headscale.v1.DisableRouteResponse
|
||||
48, // 48: headscale.v1.HeadscaleService.GetNodeRoutes:output_type -> headscale.v1.GetNodeRoutesResponse
|
||||
49, // 49: headscale.v1.HeadscaleService.DeleteRoute:output_type -> headscale.v1.DeleteRouteResponse
|
||||
50, // 50: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
51, // 51: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
52, // 52: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
53, // 53: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
|
||||
54, // 54: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse
|
||||
55, // 55: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse
|
||||
28, // [28:56] is the sub-list for method output_type
|
||||
0, // [0:28] is the sub-list for method input_type
|
||||
10, // 10: headscale.v1.HeadscaleService.SetApprovedRoutes:input_type -> headscale.v1.SetApprovedRoutesRequest
|
||||
11, // 11: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
|
||||
12, // 12: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
|
||||
13, // 13: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
|
||||
14, // 14: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
|
||||
15, // 15: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
|
||||
16, // 16: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest
|
||||
17, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
|
||||
18, // 18: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
19, // 19: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
20, // 20: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
21, // 21: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
|
||||
22, // 22: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest
|
||||
23, // 23: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest
|
||||
24, // 24: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
|
||||
25, // 25: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
|
||||
26, // 26: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
|
||||
27, // 27: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
|
||||
28, // 28: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
29, // 29: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
30, // 30: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
31, // 31: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
|
||||
32, // 32: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
|
||||
33, // 33: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
34, // 34: headscale.v1.HeadscaleService.SetApprovedRoutes:output_type -> headscale.v1.SetApprovedRoutesResponse
|
||||
35, // 35: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
|
||||
36, // 36: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
|
||||
37, // 37: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
|
||||
38, // 38: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
|
||||
39, // 39: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
|
||||
40, // 40: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
|
||||
41, // 41: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
|
||||
42, // 42: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
43, // 43: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
44, // 44: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
45, // 45: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
|
||||
46, // 46: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse
|
||||
47, // 47: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse
|
||||
24, // [24:48] is the sub-list for method output_type
|
||||
0, // [0:24] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
@@ -380,14 +172,13 @@ func file_headscale_v1_headscale_proto_init() {
|
||||
file_headscale_v1_user_proto_init()
|
||||
file_headscale_v1_preauthkey_proto_init()
|
||||
file_headscale_v1_node_proto_init()
|
||||
file_headscale_v1_routes_proto_init()
|
||||
file_headscale_v1_apikey_proto_init()
|
||||
file_headscale_v1_policy_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_headscale_proto_rawDesc,
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_headscale_proto_rawDesc), len(file_headscale_v1_headscale_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
@@ -397,7 +188,6 @@ func file_headscale_v1_headscale_proto_init() {
|
||||
DependencyIndexes: file_headscale_v1_headscale_proto_depIdxs,
|
||||
}.Build()
|
||||
File_headscale_v1_headscale_proto = out.File
|
||||
file_headscale_v1_headscale_proto_rawDesc = nil
|
||||
file_headscale_v1_headscale_proto_goTypes = nil
|
||||
file_headscale_v1_headscale_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
@@ -361,6 +361,48 @@ func local_request_HeadscaleService_SetTags_0(ctx context.Context, marshaler run
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_SetApprovedRoutes_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq SetApprovedRoutesRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
}
|
||||
protoReq.NodeId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
|
||||
}
|
||||
msg, err := client.SetApprovedRoutes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_SetApprovedRoutes_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq SetApprovedRoutesRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
}
|
||||
protoReq.NodeId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
|
||||
}
|
||||
msg, err := server.SetApprovedRoutes(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
var filter_HeadscaleService_RegisterNode_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
|
||||
|
||||
func request_HeadscaleService_RegisterNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
@@ -623,168 +665,6 @@ func local_request_HeadscaleService_BackfillNodeIPs_0(ctx context.Context, marsh
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_GetRoutes_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq GetRoutesRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
msg, err := client.GetRoutes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_GetRoutes_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq GetRoutesRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
msg, err := server.GetRoutes(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_EnableRoute_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq EnableRouteRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
val, ok := pathParams["route_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
|
||||
}
|
||||
protoReq.RouteId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
|
||||
}
|
||||
msg, err := client.EnableRoute(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_EnableRoute_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq EnableRouteRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
val, ok := pathParams["route_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
|
||||
}
|
||||
protoReq.RouteId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
|
||||
}
|
||||
msg, err := server.EnableRoute(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_DisableRoute_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq DisableRouteRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
val, ok := pathParams["route_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
|
||||
}
|
||||
protoReq.RouteId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
|
||||
}
|
||||
msg, err := client.DisableRoute(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_DisableRoute_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq DisableRouteRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
val, ok := pathParams["route_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
|
||||
}
|
||||
protoReq.RouteId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
|
||||
}
|
||||
msg, err := server.DisableRoute(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_GetNodeRoutes_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq GetNodeRoutesRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
}
|
||||
protoReq.NodeId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
|
||||
}
|
||||
msg, err := client.GetNodeRoutes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_GetNodeRoutes_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq GetNodeRoutesRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
}
|
||||
protoReq.NodeId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
|
||||
}
|
||||
msg, err := server.GetNodeRoutes(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_DeleteRoute_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq DeleteRouteRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
val, ok := pathParams["route_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
|
||||
}
|
||||
protoReq.RouteId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
|
||||
}
|
||||
msg, err := client.DeleteRoute(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_DeleteRoute_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq DeleteRouteRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
val, ok := pathParams["route_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
|
||||
}
|
||||
protoReq.RouteId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
|
||||
}
|
||||
msg, err := server.DeleteRoute(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_CreateApiKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq CreateApiKeyRequest
|
||||
@@ -1135,6 +1015,26 @@ func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_SetTags_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_SetApprovedRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/SetApprovedRoutes", runtime.WithHTTPPathPattern("/api/v1/node/{node_id}/approve_routes"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_SetApprovedRoutes_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_SetApprovedRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_RegisterNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -1275,106 +1175,6 @@ func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_BackfillNodeIPs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_HeadscaleService_GetRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/GetRoutes", runtime.WithHTTPPathPattern("/api/v1/routes"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_GetRoutes_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_GetRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_EnableRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/EnableRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}/enable"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_EnableRoute_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_EnableRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_DisableRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/DisableRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}/disable"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_DisableRoute_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_DisableRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_HeadscaleService_GetNodeRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/GetNodeRoutes", runtime.WithHTTPPathPattern("/api/v1/node/{node_id}/routes"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_GetNodeRoutes_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_GetNodeRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodDelete, pattern_HeadscaleService_DeleteRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/DeleteRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_DeleteRoute_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_DeleteRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_CreateApiKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -1705,6 +1505,23 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_SetTags_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_SetApprovedRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/SetApprovedRoutes", runtime.WithHTTPPathPattern("/api/v1/node/{node_id}/approve_routes"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_SetApprovedRoutes_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_SetApprovedRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_RegisterNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -1824,91 +1641,6 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_BackfillNodeIPs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_HeadscaleService_GetRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/GetRoutes", runtime.WithHTTPPathPattern("/api/v1/routes"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_GetRoutes_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_GetRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_EnableRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/EnableRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}/enable"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_EnableRoute_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_EnableRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_DisableRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/DisableRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}/disable"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_DisableRoute_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_DisableRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_HeadscaleService_GetNodeRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/GetNodeRoutes", runtime.WithHTTPPathPattern("/api/v1/node/{node_id}/routes"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_GetNodeRoutes_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_GetNodeRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodDelete, pattern_HeadscaleService_DeleteRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/DeleteRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_DeleteRoute_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_DeleteRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_CreateApiKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -2015,63 +1747,55 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_HeadscaleService_CreateUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "user"}, ""))
|
||||
pattern_HeadscaleService_RenameUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "user", "old_id", "rename", "new_name"}, ""))
|
||||
pattern_HeadscaleService_DeleteUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "user", "id"}, ""))
|
||||
pattern_HeadscaleService_ListUsers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "user"}, ""))
|
||||
pattern_HeadscaleService_CreatePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
|
||||
pattern_HeadscaleService_ExpirePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "preauthkey", "expire"}, ""))
|
||||
pattern_HeadscaleService_ListPreAuthKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
|
||||
pattern_HeadscaleService_DebugCreateNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "debug", "node"}, ""))
|
||||
pattern_HeadscaleService_GetNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "node", "node_id"}, ""))
|
||||
pattern_HeadscaleService_SetTags_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "tags"}, ""))
|
||||
pattern_HeadscaleService_RegisterNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "node", "register"}, ""))
|
||||
pattern_HeadscaleService_DeleteNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "node", "node_id"}, ""))
|
||||
pattern_HeadscaleService_ExpireNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "expire"}, ""))
|
||||
pattern_HeadscaleService_RenameNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "node", "node_id", "rename", "new_name"}, ""))
|
||||
pattern_HeadscaleService_ListNodes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "node"}, ""))
|
||||
pattern_HeadscaleService_MoveNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "user"}, ""))
|
||||
pattern_HeadscaleService_BackfillNodeIPs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "node", "backfillips"}, ""))
|
||||
pattern_HeadscaleService_GetRoutes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "routes"}, ""))
|
||||
pattern_HeadscaleService_EnableRoute_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "routes", "route_id", "enable"}, ""))
|
||||
pattern_HeadscaleService_DisableRoute_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "routes", "route_id", "disable"}, ""))
|
||||
pattern_HeadscaleService_GetNodeRoutes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "routes"}, ""))
|
||||
pattern_HeadscaleService_DeleteRoute_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "routes", "route_id"}, ""))
|
||||
pattern_HeadscaleService_CreateApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "apikey"}, ""))
|
||||
pattern_HeadscaleService_ExpireApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "apikey", "expire"}, ""))
|
||||
pattern_HeadscaleService_ListApiKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "apikey"}, ""))
|
||||
pattern_HeadscaleService_DeleteApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "apikey", "prefix"}, ""))
|
||||
pattern_HeadscaleService_GetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
|
||||
pattern_HeadscaleService_SetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
|
||||
pattern_HeadscaleService_CreateUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "user"}, ""))
|
||||
pattern_HeadscaleService_RenameUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "user", "old_id", "rename", "new_name"}, ""))
|
||||
pattern_HeadscaleService_DeleteUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "user", "id"}, ""))
|
||||
pattern_HeadscaleService_ListUsers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "user"}, ""))
|
||||
pattern_HeadscaleService_CreatePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
|
||||
pattern_HeadscaleService_ExpirePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "preauthkey", "expire"}, ""))
|
||||
pattern_HeadscaleService_ListPreAuthKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
|
||||
pattern_HeadscaleService_DebugCreateNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "debug", "node"}, ""))
|
||||
pattern_HeadscaleService_GetNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "node", "node_id"}, ""))
|
||||
pattern_HeadscaleService_SetTags_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "tags"}, ""))
|
||||
pattern_HeadscaleService_SetApprovedRoutes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "approve_routes"}, ""))
|
||||
pattern_HeadscaleService_RegisterNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "node", "register"}, ""))
|
||||
pattern_HeadscaleService_DeleteNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "node", "node_id"}, ""))
|
||||
pattern_HeadscaleService_ExpireNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "expire"}, ""))
|
||||
pattern_HeadscaleService_RenameNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "node", "node_id", "rename", "new_name"}, ""))
|
||||
pattern_HeadscaleService_ListNodes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "node"}, ""))
|
||||
pattern_HeadscaleService_MoveNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "user"}, ""))
|
||||
pattern_HeadscaleService_BackfillNodeIPs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "node", "backfillips"}, ""))
|
||||
pattern_HeadscaleService_CreateApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "apikey"}, ""))
|
||||
pattern_HeadscaleService_ExpireApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "apikey", "expire"}, ""))
|
||||
pattern_HeadscaleService_ListApiKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "apikey"}, ""))
|
||||
pattern_HeadscaleService_DeleteApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "apikey", "prefix"}, ""))
|
||||
pattern_HeadscaleService_GetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
|
||||
pattern_HeadscaleService_SetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
forward_HeadscaleService_CreateUser_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_RenameUser_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DeleteUser_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListUsers_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_CreatePreAuthKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ExpirePreAuthKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListPreAuthKeys_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DebugCreateNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_GetNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_SetTags_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_RegisterNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DeleteNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ExpireNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_RenameNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListNodes_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_MoveNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_BackfillNodeIPs_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_GetRoutes_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_EnableRoute_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DisableRoute_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_GetNodeRoutes_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DeleteRoute_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_CreateApiKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ExpireApiKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListApiKeys_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DeleteApiKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_GetPolicy_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_SetPolicy_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_CreateUser_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_RenameUser_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DeleteUser_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListUsers_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_CreatePreAuthKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ExpirePreAuthKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListPreAuthKeys_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DebugCreateNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_GetNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_SetTags_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_SetApprovedRoutes_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_RegisterNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DeleteNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ExpireNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_RenameNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListNodes_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_MoveNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_BackfillNodeIPs_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_CreateApiKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ExpireApiKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListApiKeys_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DeleteApiKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_GetPolicy_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_SetPolicy_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.3.0
|
||||
// - protoc-gen-go-grpc v1.5.1
|
||||
// - protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
@@ -15,38 +15,34 @@ import (
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
// Requires gRPC-Go v1.64.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion9
|
||||
|
||||
const (
|
||||
HeadscaleService_CreateUser_FullMethodName = "/headscale.v1.HeadscaleService/CreateUser"
|
||||
HeadscaleService_RenameUser_FullMethodName = "/headscale.v1.HeadscaleService/RenameUser"
|
||||
HeadscaleService_DeleteUser_FullMethodName = "/headscale.v1.HeadscaleService/DeleteUser"
|
||||
HeadscaleService_ListUsers_FullMethodName = "/headscale.v1.HeadscaleService/ListUsers"
|
||||
HeadscaleService_CreatePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/CreatePreAuthKey"
|
||||
HeadscaleService_ExpirePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpirePreAuthKey"
|
||||
HeadscaleService_ListPreAuthKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListPreAuthKeys"
|
||||
HeadscaleService_DebugCreateNode_FullMethodName = "/headscale.v1.HeadscaleService/DebugCreateNode"
|
||||
HeadscaleService_GetNode_FullMethodName = "/headscale.v1.HeadscaleService/GetNode"
|
||||
HeadscaleService_SetTags_FullMethodName = "/headscale.v1.HeadscaleService/SetTags"
|
||||
HeadscaleService_RegisterNode_FullMethodName = "/headscale.v1.HeadscaleService/RegisterNode"
|
||||
HeadscaleService_DeleteNode_FullMethodName = "/headscale.v1.HeadscaleService/DeleteNode"
|
||||
HeadscaleService_ExpireNode_FullMethodName = "/headscale.v1.HeadscaleService/ExpireNode"
|
||||
HeadscaleService_RenameNode_FullMethodName = "/headscale.v1.HeadscaleService/RenameNode"
|
||||
HeadscaleService_ListNodes_FullMethodName = "/headscale.v1.HeadscaleService/ListNodes"
|
||||
HeadscaleService_MoveNode_FullMethodName = "/headscale.v1.HeadscaleService/MoveNode"
|
||||
HeadscaleService_BackfillNodeIPs_FullMethodName = "/headscale.v1.HeadscaleService/BackfillNodeIPs"
|
||||
HeadscaleService_GetRoutes_FullMethodName = "/headscale.v1.HeadscaleService/GetRoutes"
|
||||
HeadscaleService_EnableRoute_FullMethodName = "/headscale.v1.HeadscaleService/EnableRoute"
|
||||
HeadscaleService_DisableRoute_FullMethodName = "/headscale.v1.HeadscaleService/DisableRoute"
|
||||
HeadscaleService_GetNodeRoutes_FullMethodName = "/headscale.v1.HeadscaleService/GetNodeRoutes"
|
||||
HeadscaleService_DeleteRoute_FullMethodName = "/headscale.v1.HeadscaleService/DeleteRoute"
|
||||
HeadscaleService_CreateApiKey_FullMethodName = "/headscale.v1.HeadscaleService/CreateApiKey"
|
||||
HeadscaleService_ExpireApiKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpireApiKey"
|
||||
HeadscaleService_ListApiKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListApiKeys"
|
||||
HeadscaleService_DeleteApiKey_FullMethodName = "/headscale.v1.HeadscaleService/DeleteApiKey"
|
||||
HeadscaleService_GetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/GetPolicy"
|
||||
HeadscaleService_SetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/SetPolicy"
|
||||
HeadscaleService_CreateUser_FullMethodName = "/headscale.v1.HeadscaleService/CreateUser"
|
||||
HeadscaleService_RenameUser_FullMethodName = "/headscale.v1.HeadscaleService/RenameUser"
|
||||
HeadscaleService_DeleteUser_FullMethodName = "/headscale.v1.HeadscaleService/DeleteUser"
|
||||
HeadscaleService_ListUsers_FullMethodName = "/headscale.v1.HeadscaleService/ListUsers"
|
||||
HeadscaleService_CreatePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/CreatePreAuthKey"
|
||||
HeadscaleService_ExpirePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpirePreAuthKey"
|
||||
HeadscaleService_ListPreAuthKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListPreAuthKeys"
|
||||
HeadscaleService_DebugCreateNode_FullMethodName = "/headscale.v1.HeadscaleService/DebugCreateNode"
|
||||
HeadscaleService_GetNode_FullMethodName = "/headscale.v1.HeadscaleService/GetNode"
|
||||
HeadscaleService_SetTags_FullMethodName = "/headscale.v1.HeadscaleService/SetTags"
|
||||
HeadscaleService_SetApprovedRoutes_FullMethodName = "/headscale.v1.HeadscaleService/SetApprovedRoutes"
|
||||
HeadscaleService_RegisterNode_FullMethodName = "/headscale.v1.HeadscaleService/RegisterNode"
|
||||
HeadscaleService_DeleteNode_FullMethodName = "/headscale.v1.HeadscaleService/DeleteNode"
|
||||
HeadscaleService_ExpireNode_FullMethodName = "/headscale.v1.HeadscaleService/ExpireNode"
|
||||
HeadscaleService_RenameNode_FullMethodName = "/headscale.v1.HeadscaleService/RenameNode"
|
||||
HeadscaleService_ListNodes_FullMethodName = "/headscale.v1.HeadscaleService/ListNodes"
|
||||
HeadscaleService_MoveNode_FullMethodName = "/headscale.v1.HeadscaleService/MoveNode"
|
||||
HeadscaleService_BackfillNodeIPs_FullMethodName = "/headscale.v1.HeadscaleService/BackfillNodeIPs"
|
||||
HeadscaleService_CreateApiKey_FullMethodName = "/headscale.v1.HeadscaleService/CreateApiKey"
|
||||
HeadscaleService_ExpireApiKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpireApiKey"
|
||||
HeadscaleService_ListApiKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListApiKeys"
|
||||
HeadscaleService_DeleteApiKey_FullMethodName = "/headscale.v1.HeadscaleService/DeleteApiKey"
|
||||
HeadscaleService_GetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/GetPolicy"
|
||||
HeadscaleService_SetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/SetPolicy"
|
||||
)
|
||||
|
||||
// HeadscaleServiceClient is the client API for HeadscaleService service.
|
||||
@@ -66,6 +62,7 @@ type HeadscaleServiceClient interface {
|
||||
DebugCreateNode(ctx context.Context, in *DebugCreateNodeRequest, opts ...grpc.CallOption) (*DebugCreateNodeResponse, error)
|
||||
GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error)
|
||||
SetTags(ctx context.Context, in *SetTagsRequest, opts ...grpc.CallOption) (*SetTagsResponse, error)
|
||||
SetApprovedRoutes(ctx context.Context, in *SetApprovedRoutesRequest, opts ...grpc.CallOption) (*SetApprovedRoutesResponse, error)
|
||||
RegisterNode(ctx context.Context, in *RegisterNodeRequest, opts ...grpc.CallOption) (*RegisterNodeResponse, error)
|
||||
DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*DeleteNodeResponse, error)
|
||||
ExpireNode(ctx context.Context, in *ExpireNodeRequest, opts ...grpc.CallOption) (*ExpireNodeResponse, error)
|
||||
@@ -73,12 +70,6 @@ type HeadscaleServiceClient interface {
|
||||
ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error)
|
||||
MoveNode(ctx context.Context, in *MoveNodeRequest, opts ...grpc.CallOption) (*MoveNodeResponse, error)
|
||||
BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error)
|
||||
// --- Route start ---
|
||||
GetRoutes(ctx context.Context, in *GetRoutesRequest, opts ...grpc.CallOption) (*GetRoutesResponse, error)
|
||||
EnableRoute(ctx context.Context, in *EnableRouteRequest, opts ...grpc.CallOption) (*EnableRouteResponse, error)
|
||||
DisableRoute(ctx context.Context, in *DisableRouteRequest, opts ...grpc.CallOption) (*DisableRouteResponse, error)
|
||||
GetNodeRoutes(ctx context.Context, in *GetNodeRoutesRequest, opts ...grpc.CallOption) (*GetNodeRoutesResponse, error)
|
||||
DeleteRoute(ctx context.Context, in *DeleteRouteRequest, opts ...grpc.CallOption) (*DeleteRouteResponse, error)
|
||||
// --- ApiKeys start ---
|
||||
CreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error)
|
||||
ExpireApiKey(ctx context.Context, in *ExpireApiKeyRequest, opts ...grpc.CallOption) (*ExpireApiKeyResponse, error)
|
||||
@@ -98,8 +89,9 @@ func NewHeadscaleServiceClient(cc grpc.ClientConnInterface) HeadscaleServiceClie
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) CreateUser(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*CreateUserResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(CreateUserResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_CreateUser_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_CreateUser_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -107,8 +99,9 @@ func (c *headscaleServiceClient) CreateUser(ctx context.Context, in *CreateUserR
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) RenameUser(ctx context.Context, in *RenameUserRequest, opts ...grpc.CallOption) (*RenameUserResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(RenameUserResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_RenameUser_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_RenameUser_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -116,8 +109,9 @@ func (c *headscaleServiceClient) RenameUser(ctx context.Context, in *RenameUserR
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DeleteUser(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*DeleteUserResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(DeleteUserResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteUser_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteUser_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -125,8 +119,9 @@ func (c *headscaleServiceClient) DeleteUser(ctx context.Context, in *DeleteUserR
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListUsers(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ListUsersResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListUsers_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListUsers_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -134,8 +129,9 @@ func (c *headscaleServiceClient) ListUsers(ctx context.Context, in *ListUsersReq
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) CreatePreAuthKey(ctx context.Context, in *CreatePreAuthKeyRequest, opts ...grpc.CallOption) (*CreatePreAuthKeyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(CreatePreAuthKeyResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_CreatePreAuthKey_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_CreatePreAuthKey_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -143,8 +139,9 @@ func (c *headscaleServiceClient) CreatePreAuthKey(ctx context.Context, in *Creat
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ExpirePreAuthKey(ctx context.Context, in *ExpirePreAuthKeyRequest, opts ...grpc.CallOption) (*ExpirePreAuthKeyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ExpirePreAuthKeyResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ExpirePreAuthKey_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ExpirePreAuthKey_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -152,8 +149,9 @@ func (c *headscaleServiceClient) ExpirePreAuthKey(ctx context.Context, in *Expir
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListPreAuthKeys(ctx context.Context, in *ListPreAuthKeysRequest, opts ...grpc.CallOption) (*ListPreAuthKeysResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ListPreAuthKeysResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListPreAuthKeys_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListPreAuthKeys_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -161,8 +159,9 @@ func (c *headscaleServiceClient) ListPreAuthKeys(ctx context.Context, in *ListPr
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DebugCreateNode(ctx context.Context, in *DebugCreateNodeRequest, opts ...grpc.CallOption) (*DebugCreateNodeResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(DebugCreateNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DebugCreateNode_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DebugCreateNode_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -170,8 +169,9 @@ func (c *headscaleServiceClient) DebugCreateNode(ctx context.Context, in *DebugC
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(GetNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_GetNode_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_GetNode_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -179,8 +179,19 @@ func (c *headscaleServiceClient) GetNode(ctx context.Context, in *GetNodeRequest
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) SetTags(ctx context.Context, in *SetTagsRequest, opts ...grpc.CallOption) (*SetTagsResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(SetTagsResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_SetTags_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_SetTags_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) SetApprovedRoutes(ctx context.Context, in *SetApprovedRoutesRequest, opts ...grpc.CallOption) (*SetApprovedRoutesResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(SetApprovedRoutesResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_SetApprovedRoutes_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -188,8 +199,9 @@ func (c *headscaleServiceClient) SetTags(ctx context.Context, in *SetTagsRequest
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) RegisterNode(ctx context.Context, in *RegisterNodeRequest, opts ...grpc.CallOption) (*RegisterNodeResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(RegisterNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_RegisterNode_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_RegisterNode_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -197,8 +209,9 @@ func (c *headscaleServiceClient) RegisterNode(ctx context.Context, in *RegisterN
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*DeleteNodeResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(DeleteNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteNode_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteNode_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -206,8 +219,9 @@ func (c *headscaleServiceClient) DeleteNode(ctx context.Context, in *DeleteNodeR
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ExpireNode(ctx context.Context, in *ExpireNodeRequest, opts ...grpc.CallOption) (*ExpireNodeResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ExpireNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ExpireNode_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ExpireNode_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -215,8 +229,9 @@ func (c *headscaleServiceClient) ExpireNode(ctx context.Context, in *ExpireNodeR
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) RenameNode(ctx context.Context, in *RenameNodeRequest, opts ...grpc.CallOption) (*RenameNodeResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(RenameNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_RenameNode_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_RenameNode_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -224,8 +239,9 @@ func (c *headscaleServiceClient) RenameNode(ctx context.Context, in *RenameNodeR
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ListNodesResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListNodes_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListNodes_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -233,8 +249,9 @@ func (c *headscaleServiceClient) ListNodes(ctx context.Context, in *ListNodesReq
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) MoveNode(ctx context.Context, in *MoveNodeRequest, opts ...grpc.CallOption) (*MoveNodeResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(MoveNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_MoveNode_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_MoveNode_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -242,53 +259,9 @@ func (c *headscaleServiceClient) MoveNode(ctx context.Context, in *MoveNodeReque
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(BackfillNodeIPsResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_BackfillNodeIPs_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetRoutes(ctx context.Context, in *GetRoutesRequest, opts ...grpc.CallOption) (*GetRoutesResponse, error) {
|
||||
out := new(GetRoutesResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_GetRoutes_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) EnableRoute(ctx context.Context, in *EnableRouteRequest, opts ...grpc.CallOption) (*EnableRouteResponse, error) {
|
||||
out := new(EnableRouteResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_EnableRoute_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DisableRoute(ctx context.Context, in *DisableRouteRequest, opts ...grpc.CallOption) (*DisableRouteResponse, error) {
|
||||
out := new(DisableRouteResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DisableRoute_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetNodeRoutes(ctx context.Context, in *GetNodeRoutesRequest, opts ...grpc.CallOption) (*GetNodeRoutesResponse, error) {
|
||||
out := new(GetNodeRoutesResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_GetNodeRoutes_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DeleteRoute(ctx context.Context, in *DeleteRouteRequest, opts ...grpc.CallOption) (*DeleteRouteResponse, error) {
|
||||
out := new(DeleteRouteResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteRoute_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_BackfillNodeIPs_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -296,8 +269,9 @@ func (c *headscaleServiceClient) DeleteRoute(ctx context.Context, in *DeleteRout
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) CreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(CreateApiKeyResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_CreateApiKey_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_CreateApiKey_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -305,8 +279,9 @@ func (c *headscaleServiceClient) CreateApiKey(ctx context.Context, in *CreateApi
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ExpireApiKey(ctx context.Context, in *ExpireApiKeyRequest, opts ...grpc.CallOption) (*ExpireApiKeyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ExpireApiKeyResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ExpireApiKey_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ExpireApiKey_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -314,8 +289,9 @@ func (c *headscaleServiceClient) ExpireApiKey(ctx context.Context, in *ExpireApi
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListApiKeys(ctx context.Context, in *ListApiKeysRequest, opts ...grpc.CallOption) (*ListApiKeysResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ListApiKeysResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListApiKeys_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListApiKeys_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -323,8 +299,9 @@ func (c *headscaleServiceClient) ListApiKeys(ctx context.Context, in *ListApiKey
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DeleteApiKey(ctx context.Context, in *DeleteApiKeyRequest, opts ...grpc.CallOption) (*DeleteApiKeyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(DeleteApiKeyResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteApiKey_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteApiKey_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -332,8 +309,9 @@ func (c *headscaleServiceClient) DeleteApiKey(ctx context.Context, in *DeleteApi
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetPolicy(ctx context.Context, in *GetPolicyRequest, opts ...grpc.CallOption) (*GetPolicyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(GetPolicyResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_GetPolicy_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_GetPolicy_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -341,8 +319,9 @@ func (c *headscaleServiceClient) GetPolicy(ctx context.Context, in *GetPolicyReq
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) SetPolicy(ctx context.Context, in *SetPolicyRequest, opts ...grpc.CallOption) (*SetPolicyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(SetPolicyResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_SetPolicy_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_SetPolicy_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -351,7 +330,7 @@ func (c *headscaleServiceClient) SetPolicy(ctx context.Context, in *SetPolicyReq
|
||||
|
||||
// HeadscaleServiceServer is the server API for HeadscaleService service.
|
||||
// All implementations must embed UnimplementedHeadscaleServiceServer
|
||||
// for forward compatibility
|
||||
// for forward compatibility.
|
||||
type HeadscaleServiceServer interface {
|
||||
// --- User start ---
|
||||
CreateUser(context.Context, *CreateUserRequest) (*CreateUserResponse, error)
|
||||
@@ -366,6 +345,7 @@ type HeadscaleServiceServer interface {
|
||||
DebugCreateNode(context.Context, *DebugCreateNodeRequest) (*DebugCreateNodeResponse, error)
|
||||
GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error)
|
||||
SetTags(context.Context, *SetTagsRequest) (*SetTagsResponse, error)
|
||||
SetApprovedRoutes(context.Context, *SetApprovedRoutesRequest) (*SetApprovedRoutesResponse, error)
|
||||
RegisterNode(context.Context, *RegisterNodeRequest) (*RegisterNodeResponse, error)
|
||||
DeleteNode(context.Context, *DeleteNodeRequest) (*DeleteNodeResponse, error)
|
||||
ExpireNode(context.Context, *ExpireNodeRequest) (*ExpireNodeResponse, error)
|
||||
@@ -373,12 +353,6 @@ type HeadscaleServiceServer interface {
|
||||
ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error)
|
||||
MoveNode(context.Context, *MoveNodeRequest) (*MoveNodeResponse, error)
|
||||
BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error)
|
||||
// --- Route start ---
|
||||
GetRoutes(context.Context, *GetRoutesRequest) (*GetRoutesResponse, error)
|
||||
EnableRoute(context.Context, *EnableRouteRequest) (*EnableRouteResponse, error)
|
||||
DisableRoute(context.Context, *DisableRouteRequest) (*DisableRouteResponse, error)
|
||||
GetNodeRoutes(context.Context, *GetNodeRoutesRequest) (*GetNodeRoutesResponse, error)
|
||||
DeleteRoute(context.Context, *DeleteRouteRequest) (*DeleteRouteResponse, error)
|
||||
// --- ApiKeys start ---
|
||||
CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error)
|
||||
ExpireApiKey(context.Context, *ExpireApiKeyRequest) (*ExpireApiKeyResponse, error)
|
||||
@@ -390,9 +364,12 @@ type HeadscaleServiceServer interface {
|
||||
mustEmbedUnimplementedHeadscaleServiceServer()
|
||||
}
|
||||
|
||||
// UnimplementedHeadscaleServiceServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedHeadscaleServiceServer struct {
|
||||
}
|
||||
// UnimplementedHeadscaleServiceServer must be embedded to have
|
||||
// forward compatible implementations.
|
||||
//
|
||||
// NOTE: this should be embedded by value instead of pointer to avoid a nil
|
||||
// pointer dereference when methods are called.
|
||||
type UnimplementedHeadscaleServiceServer struct{}
|
||||
|
||||
func (UnimplementedHeadscaleServiceServer) CreateUser(context.Context, *CreateUserRequest) (*CreateUserResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateUser not implemented")
|
||||
@@ -424,6 +401,9 @@ func (UnimplementedHeadscaleServiceServer) GetNode(context.Context, *GetNodeRequ
|
||||
func (UnimplementedHeadscaleServiceServer) SetTags(context.Context, *SetTagsRequest) (*SetTagsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetTags not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) SetApprovedRoutes(context.Context, *SetApprovedRoutesRequest) (*SetApprovedRoutesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetApprovedRoutes not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) RegisterNode(context.Context, *RegisterNodeRequest) (*RegisterNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RegisterNode not implemented")
|
||||
}
|
||||
@@ -445,21 +425,6 @@ func (UnimplementedHeadscaleServiceServer) MoveNode(context.Context, *MoveNodeRe
|
||||
func (UnimplementedHeadscaleServiceServer) BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method BackfillNodeIPs not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetRoutes(context.Context, *GetRoutesRequest) (*GetRoutesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetRoutes not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) EnableRoute(context.Context, *EnableRouteRequest) (*EnableRouteResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method EnableRoute not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DisableRoute(context.Context, *DisableRouteRequest) (*DisableRouteResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DisableRoute not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetNodeRoutes(context.Context, *GetNodeRoutesRequest) (*GetNodeRoutesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetNodeRoutes not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeleteRoute(context.Context, *DeleteRouteRequest) (*DeleteRouteResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteRoute not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateApiKey not implemented")
|
||||
}
|
||||
@@ -479,6 +444,7 @@ func (UnimplementedHeadscaleServiceServer) SetPolicy(context.Context, *SetPolicy
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetPolicy not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) mustEmbedUnimplementedHeadscaleServiceServer() {}
|
||||
func (UnimplementedHeadscaleServiceServer) testEmbeddedByValue() {}
|
||||
|
||||
// UnsafeHeadscaleServiceServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to HeadscaleServiceServer will
|
||||
@@ -488,6 +454,13 @@ type UnsafeHeadscaleServiceServer interface {
|
||||
}
|
||||
|
||||
func RegisterHeadscaleServiceServer(s grpc.ServiceRegistrar, srv HeadscaleServiceServer) {
|
||||
// If the following call pancis, it indicates UnimplementedHeadscaleServiceServer was
|
||||
// embedded by pointer and is nil. This will cause panics if an
|
||||
// unimplemented method is ever invoked, so we test this at initialization
|
||||
// time to prevent it from happening at runtime later due to I/O.
|
||||
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
|
||||
t.testEmbeddedByValue()
|
||||
}
|
||||
s.RegisterService(&HeadscaleService_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
@@ -671,6 +644,24 @@ func _HeadscaleService_SetTags_Handler(srv interface{}, ctx context.Context, dec
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_SetApprovedRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SetApprovedRoutesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).SetApprovedRoutes(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_SetApprovedRoutes_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).SetApprovedRoutes(ctx, req.(*SetApprovedRoutesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_RegisterNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RegisterNodeRequest)
|
||||
if err := dec(in); err != nil {
|
||||
@@ -797,96 +788,6 @@ func _HeadscaleService_BackfillNodeIPs_Handler(srv interface{}, ctx context.Cont
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_GetRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetRoutesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).GetRoutes(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_GetRoutes_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).GetRoutes(ctx, req.(*GetRoutesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_EnableRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(EnableRouteRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).EnableRoute(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_EnableRoute_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).EnableRoute(ctx, req.(*EnableRouteRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_DisableRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DisableRouteRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).DisableRoute(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_DisableRoute_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DisableRoute(ctx, req.(*DisableRouteRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_GetNodeRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetNodeRoutesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).GetNodeRoutes(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_GetNodeRoutes_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).GetNodeRoutes(ctx, req.(*GetNodeRoutesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_DeleteRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteRouteRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).DeleteRoute(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_DeleteRoute_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DeleteRoute(ctx, req.(*DeleteRouteRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_CreateApiKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateApiKeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
@@ -1042,6 +943,10 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "SetTags",
|
||||
Handler: _HeadscaleService_SetTags_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "SetApprovedRoutes",
|
||||
Handler: _HeadscaleService_SetApprovedRoutes_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "RegisterNode",
|
||||
Handler: _HeadscaleService_RegisterNode_Handler,
|
||||
@@ -1070,26 +975,6 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "BackfillNodeIPs",
|
||||
Handler: _HeadscaleService_BackfillNodeIPs_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetRoutes",
|
||||
Handler: _HeadscaleService_GetRoutes_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "EnableRoute",
|
||||
Handler: _HeadscaleService_EnableRoute_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DisableRoute",
|
||||
Handler: _HeadscaleService_DisableRoute_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetNodeRoutes",
|
||||
Handler: _HeadscaleService_GetNodeRoutes_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteRoute",
|
||||
Handler: _HeadscaleService_DeleteRoute_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreateApiKey",
|
||||
Handler: _HeadscaleService_CreateApiKey_Handler,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.1
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/policy.proto
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -22,11 +23,10 @@ const (
|
||||
)
|
||||
|
||||
type SetPolicyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *SetPolicyRequest) Reset() {
|
||||
@@ -67,12 +67,11 @@ func (x *SetPolicyRequest) GetPolicy() string {
|
||||
}
|
||||
|
||||
type SetPolicyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
|
||||
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
|
||||
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *SetPolicyResponse) Reset() {
|
||||
@@ -120,9 +119,9 @@ func (x *SetPolicyResponse) GetUpdatedAt() *timestamppb.Timestamp {
|
||||
}
|
||||
|
||||
type GetPolicyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *GetPolicyRequest) Reset() {
|
||||
@@ -156,12 +155,11 @@ func (*GetPolicyRequest) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type GetPolicyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
|
||||
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
|
||||
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *GetPolicyResponse) Reset() {
|
||||
@@ -210,42 +208,29 @@ func (x *GetPolicyResponse) GetUpdatedAt() *timestamppb.Timestamp {
|
||||
|
||||
var File_headscale_v1_policy_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_policy_proto_rawDesc = []byte{
|
||||
0x0a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x70,
|
||||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2a, 0x0a, 0x10, 0x53, 0x65,
|
||||
0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16,
|
||||
0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
|
||||
0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x66, 0x0a, 0x11, 0x53, 0x65, 0x74, 0x50, 0x6f, 0x6c,
|
||||
0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70,
|
||||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6f, 0x6c,
|
||||
0x69, 0x63, 0x79, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61,
|
||||
0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
|
||||
0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x12,
|
||||
0x0a, 0x10, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x22, 0x66, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63,
|
||||
0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
|
||||
0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
|
||||
0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69,
|
||||
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e,
|
||||
0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f,
|
||||
0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
const file_headscale_v1_policy_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x19headscale/v1/policy.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"*\n" +
|
||||
"\x10SetPolicyRequest\x12\x16\n" +
|
||||
"\x06policy\x18\x01 \x01(\tR\x06policy\"f\n" +
|
||||
"\x11SetPolicyResponse\x12\x16\n" +
|
||||
"\x06policy\x18\x01 \x01(\tR\x06policy\x129\n" +
|
||||
"\n" +
|
||||
"updated_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\tupdatedAt\"\x12\n" +
|
||||
"\x10GetPolicyRequest\"f\n" +
|
||||
"\x11GetPolicyResponse\x12\x16\n" +
|
||||
"\x06policy\x18\x01 \x01(\tR\x06policy\x129\n" +
|
||||
"\n" +
|
||||
"updated_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\tupdatedAtB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
|
||||
var (
|
||||
file_headscale_v1_policy_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_policy_proto_rawDescData = file_headscale_v1_policy_proto_rawDesc
|
||||
file_headscale_v1_policy_proto_rawDescData []byte
|
||||
)
|
||||
|
||||
func file_headscale_v1_policy_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_policy_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_policy_proto_rawDescData)
|
||||
file_headscale_v1_policy_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_policy_proto_rawDesc), len(file_headscale_v1_policy_proto_rawDesc)))
|
||||
})
|
||||
return file_headscale_v1_policy_proto_rawDescData
|
||||
}
|
||||
@@ -277,7 +262,7 @@ func file_headscale_v1_policy_proto_init() {
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_policy_proto_rawDesc,
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_policy_proto_rawDesc), len(file_headscale_v1_policy_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
@@ -288,7 +273,6 @@ func file_headscale_v1_policy_proto_init() {
|
||||
MessageInfos: file_headscale_v1_policy_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_policy_proto = out.File
|
||||
file_headscale_v1_policy_proto_rawDesc = nil
|
||||
file_headscale_v1_policy_proto_goTypes = nil
|
||||
file_headscale_v1_policy_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.1
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/preauthkey.proto
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -22,19 +23,18 @@ const (
|
||||
)
|
||||
|
||||
type PreAuthKey struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Reusable bool `protobuf:"varint,4,opt,name=reusable,proto3" json:"reusable,omitempty"`
|
||||
Ephemeral bool `protobuf:"varint,5,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
|
||||
Used bool `protobuf:"varint,6,opt,name=used,proto3" json:"used,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
AclTags []string `protobuf:"bytes,9,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Reusable bool `protobuf:"varint,4,opt,name=reusable,proto3" json:"reusable,omitempty"`
|
||||
Ephemeral bool `protobuf:"varint,5,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
|
||||
Used bool `protobuf:"varint,6,opt,name=used,proto3" json:"used,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
AclTags []string `protobuf:"bytes,9,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) Reset() {
|
||||
@@ -67,18 +67,18 @@ func (*PreAuthKey) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetUser() string {
|
||||
func (x *PreAuthKey) GetUser() *User {
|
||||
if x != nil {
|
||||
return x.User
|
||||
}
|
||||
return ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetId() string {
|
||||
func (x *PreAuthKey) GetId() uint64 {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return ""
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetKey() string {
|
||||
@@ -131,15 +131,14 @@ func (x *PreAuthKey) GetAclTags() []string {
|
||||
}
|
||||
|
||||
type CreatePreAuthKeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
Reusable bool `protobuf:"varint,2,opt,name=reusable,proto3" json:"reusable,omitempty"`
|
||||
Ephemeral bool `protobuf:"varint,3,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
AclTags []string `protobuf:"bytes,5,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
Reusable bool `protobuf:"varint,2,opt,name=reusable,proto3" json:"reusable,omitempty"`
|
||||
Ephemeral bool `protobuf:"varint,3,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
AclTags []string `protobuf:"bytes,5,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) Reset() {
|
||||
@@ -172,11 +171,11 @@ func (*CreatePreAuthKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) GetUser() string {
|
||||
func (x *CreatePreAuthKeyRequest) GetUser() uint64 {
|
||||
if x != nil {
|
||||
return x.User
|
||||
}
|
||||
return ""
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) GetReusable() bool {
|
||||
@@ -208,11 +207,10 @@ func (x *CreatePreAuthKeyRequest) GetAclTags() []string {
|
||||
}
|
||||
|
||||
type CreatePreAuthKeyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
PreAuthKey *PreAuthKey `protobuf:"bytes,1,opt,name=pre_auth_key,json=preAuthKey,proto3" json:"pre_auth_key,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
PreAuthKey *PreAuthKey `protobuf:"bytes,1,opt,name=pre_auth_key,json=preAuthKey,proto3" json:"pre_auth_key,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyResponse) Reset() {
|
||||
@@ -253,12 +251,11 @@ func (x *CreatePreAuthKeyResponse) GetPreAuthKey() *PreAuthKey {
|
||||
}
|
||||
|
||||
type ExpirePreAuthKeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) Reset() {
|
||||
@@ -291,11 +288,11 @@ func (*ExpirePreAuthKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) GetUser() string {
|
||||
func (x *ExpirePreAuthKeyRequest) GetUser() uint64 {
|
||||
if x != nil {
|
||||
return x.User
|
||||
}
|
||||
return ""
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) GetKey() string {
|
||||
@@ -306,9 +303,9 @@ func (x *ExpirePreAuthKeyRequest) GetKey() string {
|
||||
}
|
||||
|
||||
type ExpirePreAuthKeyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyResponse) Reset() {
|
||||
@@ -342,11 +339,10 @@ func (*ExpirePreAuthKeyResponse) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type ListPreAuthKeysRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) Reset() {
|
||||
@@ -379,19 +375,18 @@ func (*ListPreAuthKeysRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) GetUser() string {
|
||||
func (x *ListPreAuthKeysRequest) GetUser() uint64 {
|
||||
if x != nil {
|
||||
return x.User
|
||||
}
|
||||
return ""
|
||||
return 0
|
||||
}
|
||||
|
||||
type ListPreAuthKeysResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
PreAuthKeys []*PreAuthKey `protobuf:"bytes,1,rep,name=pre_auth_keys,json=preAuthKeys,proto3" json:"pre_auth_keys,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
PreAuthKeys []*PreAuthKey `protobuf:"bytes,1,rep,name=pre_auth_keys,json=preAuthKeys,proto3" json:"pre_auth_keys,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysResponse) Reset() {
|
||||
@@ -433,76 +428,51 @@ func (x *ListPreAuthKeysResponse) GetPreAuthKeys() []*PreAuthKey {
|
||||
|
||||
var File_headscale_v1_preauthkey_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_preauthkey_proto_rawDesc = []byte{
|
||||
0x0a, 0x1d, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x70,
|
||||
0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
|
||||
0x0c, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2,
|
||||
0x02, 0x0a, 0x0a, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a,
|
||||
0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65,
|
||||
0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69,
|
||||
0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
|
||||
0x6b, 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18,
|
||||
0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12,
|
||||
0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01,
|
||||
0x28, 0x08, 0x52, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x12, 0x0a,
|
||||
0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x75, 0x73, 0x65,
|
||||
0x64, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
|
||||
0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
|
||||
0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a,
|
||||
0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63,
|
||||
0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x6c, 0x5f,
|
||||
0x74, 0x61, 0x67, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x6c, 0x54,
|
||||
0x61, 0x67, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72,
|
||||
0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
|
||||
0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75,
|
||||
0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12,
|
||||
0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x08, 0x52, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x3a, 0x0a,
|
||||
0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65,
|
||||
0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x6c,
|
||||
0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x6c,
|
||||
0x54, 0x61, 0x67, 0x73, 0x22, 0x56, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72,
|
||||
0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6b, 0x65, 0x79,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79,
|
||||
0x52, 0x0a, 0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x22, 0x3f, 0x0a, 0x17,
|
||||
0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b,
|
||||
0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x1a, 0x0a,
|
||||
0x18, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65,
|
||||
0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x0a, 0x16, 0x4c, 0x69, 0x73,
|
||||
0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x22, 0x57, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x50,
|
||||
0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6b,
|
||||
0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68,
|
||||
0x4b, 0x65, 0x79, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73,
|
||||
0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a,
|
||||
0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
}
|
||||
const file_headscale_v1_preauthkey_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x1dheadscale/v1/preauthkey.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17headscale/v1/user.proto\"\xb6\x02\n" +
|
||||
"\n" +
|
||||
"PreAuthKey\x12&\n" +
|
||||
"\x04user\x18\x01 \x01(\v2\x12.headscale.v1.UserR\x04user\x12\x0e\n" +
|
||||
"\x02id\x18\x02 \x01(\x04R\x02id\x12\x10\n" +
|
||||
"\x03key\x18\x03 \x01(\tR\x03key\x12\x1a\n" +
|
||||
"\breusable\x18\x04 \x01(\bR\breusable\x12\x1c\n" +
|
||||
"\tephemeral\x18\x05 \x01(\bR\tephemeral\x12\x12\n" +
|
||||
"\x04used\x18\x06 \x01(\bR\x04used\x12:\n" +
|
||||
"\n" +
|
||||
"expiration\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\n" +
|
||||
"expiration\x129\n" +
|
||||
"\n" +
|
||||
"created_at\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x12\x19\n" +
|
||||
"\bacl_tags\x18\t \x03(\tR\aaclTags\"\xbe\x01\n" +
|
||||
"\x17CreatePreAuthKeyRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\x04R\x04user\x12\x1a\n" +
|
||||
"\breusable\x18\x02 \x01(\bR\breusable\x12\x1c\n" +
|
||||
"\tephemeral\x18\x03 \x01(\bR\tephemeral\x12:\n" +
|
||||
"\n" +
|
||||
"expiration\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\n" +
|
||||
"expiration\x12\x19\n" +
|
||||
"\bacl_tags\x18\x05 \x03(\tR\aaclTags\"V\n" +
|
||||
"\x18CreatePreAuthKeyResponse\x12:\n" +
|
||||
"\fpre_auth_key\x18\x01 \x01(\v2\x18.headscale.v1.PreAuthKeyR\n" +
|
||||
"preAuthKey\"?\n" +
|
||||
"\x17ExpirePreAuthKeyRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\x04R\x04user\x12\x10\n" +
|
||||
"\x03key\x18\x02 \x01(\tR\x03key\"\x1a\n" +
|
||||
"\x18ExpirePreAuthKeyResponse\",\n" +
|
||||
"\x16ListPreAuthKeysRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\x04R\x04user\"W\n" +
|
||||
"\x17ListPreAuthKeysResponse\x12<\n" +
|
||||
"\rpre_auth_keys\x18\x01 \x03(\v2\x18.headscale.v1.PreAuthKeyR\vpreAuthKeysB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
|
||||
var (
|
||||
file_headscale_v1_preauthkey_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_preauthkey_proto_rawDescData = file_headscale_v1_preauthkey_proto_rawDesc
|
||||
file_headscale_v1_preauthkey_proto_rawDescData []byte
|
||||
)
|
||||
|
||||
func file_headscale_v1_preauthkey_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_preauthkey_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_preauthkey_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_preauthkey_proto_rawDescData)
|
||||
file_headscale_v1_preauthkey_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_preauthkey_proto_rawDesc), len(file_headscale_v1_preauthkey_proto_rawDesc)))
|
||||
})
|
||||
return file_headscale_v1_preauthkey_proto_rawDescData
|
||||
}
|
||||
@@ -516,19 +486,21 @@ var file_headscale_v1_preauthkey_proto_goTypes = []any{
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 4: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysRequest)(nil), // 5: headscale.v1.ListPreAuthKeysRequest
|
||||
(*ListPreAuthKeysResponse)(nil), // 6: headscale.v1.ListPreAuthKeysResponse
|
||||
(*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp
|
||||
(*User)(nil), // 7: headscale.v1.User
|
||||
(*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp
|
||||
}
|
||||
var file_headscale_v1_preauthkey_proto_depIdxs = []int32{
|
||||
7, // 0: headscale.v1.PreAuthKey.expiration:type_name -> google.protobuf.Timestamp
|
||||
7, // 1: headscale.v1.PreAuthKey.created_at:type_name -> google.protobuf.Timestamp
|
||||
7, // 2: headscale.v1.CreatePreAuthKeyRequest.expiration:type_name -> google.protobuf.Timestamp
|
||||
0, // 3: headscale.v1.CreatePreAuthKeyResponse.pre_auth_key:type_name -> headscale.v1.PreAuthKey
|
||||
0, // 4: headscale.v1.ListPreAuthKeysResponse.pre_auth_keys:type_name -> headscale.v1.PreAuthKey
|
||||
5, // [5:5] is the sub-list for method output_type
|
||||
5, // [5:5] is the sub-list for method input_type
|
||||
5, // [5:5] is the sub-list for extension type_name
|
||||
5, // [5:5] is the sub-list for extension extendee
|
||||
0, // [0:5] is the sub-list for field type_name
|
||||
7, // 0: headscale.v1.PreAuthKey.user:type_name -> headscale.v1.User
|
||||
8, // 1: headscale.v1.PreAuthKey.expiration:type_name -> google.protobuf.Timestamp
|
||||
8, // 2: headscale.v1.PreAuthKey.created_at:type_name -> google.protobuf.Timestamp
|
||||
8, // 3: headscale.v1.CreatePreAuthKeyRequest.expiration:type_name -> google.protobuf.Timestamp
|
||||
0, // 4: headscale.v1.CreatePreAuthKeyResponse.pre_auth_key:type_name -> headscale.v1.PreAuthKey
|
||||
0, // 5: headscale.v1.ListPreAuthKeysResponse.pre_auth_keys:type_name -> headscale.v1.PreAuthKey
|
||||
6, // [6:6] is the sub-list for method output_type
|
||||
6, // [6:6] is the sub-list for method input_type
|
||||
6, // [6:6] is the sub-list for extension type_name
|
||||
6, // [6:6] is the sub-list for extension extendee
|
||||
0, // [0:6] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_preauthkey_proto_init() }
|
||||
@@ -536,11 +508,12 @@ func file_headscale_v1_preauthkey_proto_init() {
|
||||
if File_headscale_v1_preauthkey_proto != nil {
|
||||
return
|
||||
}
|
||||
file_headscale_v1_user_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_preauthkey_proto_rawDesc,
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_preauthkey_proto_rawDesc), len(file_headscale_v1_preauthkey_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 7,
|
||||
NumExtensions: 0,
|
||||
@@ -551,7 +524,6 @@ func file_headscale_v1_preauthkey_proto_init() {
|
||||
MessageInfos: file_headscale_v1_preauthkey_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_preauthkey_proto = out.File
|
||||
file_headscale_v1_preauthkey_proto_rawDesc = nil
|
||||
file_headscale_v1_preauthkey_proto_goTypes = nil
|
||||
file_headscale_v1_preauthkey_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
@@ -1,677 +0,0 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/routes.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Route struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"`
|
||||
Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
Advertised bool `protobuf:"varint,4,opt,name=advertised,proto3" json:"advertised,omitempty"`
|
||||
Enabled bool `protobuf:"varint,5,opt,name=enabled,proto3" json:"enabled,omitempty"`
|
||||
IsPrimary bool `protobuf:"varint,6,opt,name=is_primary,json=isPrimary,proto3" json:"is_primary,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
|
||||
DeletedAt *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=deleted_at,json=deletedAt,proto3" json:"deleted_at,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Route) Reset() {
|
||||
*x = Route{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *Route) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Route) ProtoMessage() {}
|
||||
|
||||
func (x *Route) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Route.ProtoReflect.Descriptor instead.
|
||||
func (*Route) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Route) GetId() uint64 {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Route) GetNode() *Node {
|
||||
if x != nil {
|
||||
return x.Node
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Route) GetPrefix() string {
|
||||
if x != nil {
|
||||
return x.Prefix
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Route) GetAdvertised() bool {
|
||||
if x != nil {
|
||||
return x.Advertised
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *Route) GetEnabled() bool {
|
||||
if x != nil {
|
||||
return x.Enabled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *Route) GetIsPrimary() bool {
|
||||
if x != nil {
|
||||
return x.IsPrimary
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *Route) GetCreatedAt() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.CreatedAt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Route) GetUpdatedAt() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.UpdatedAt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Route) GetDeletedAt() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.DeletedAt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GetRoutesRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *GetRoutesRequest) Reset() {
|
||||
*x = GetRoutesRequest{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *GetRoutesRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetRoutesRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetRoutesRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetRoutesRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetRoutesRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
type GetRoutesResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Routes []*Route `protobuf:"bytes,1,rep,name=routes,proto3" json:"routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetRoutesResponse) Reset() {
|
||||
*x = GetRoutesResponse{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *GetRoutesResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetRoutesResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetRoutesResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[2]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetRoutesResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetRoutesResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *GetRoutesResponse) GetRoutes() []*Route {
|
||||
if x != nil {
|
||||
return x.Routes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EnableRouteRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
RouteId uint64 `protobuf:"varint,1,opt,name=route_id,json=routeId,proto3" json:"route_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *EnableRouteRequest) Reset() {
|
||||
*x = EnableRouteRequest{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *EnableRouteRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EnableRouteRequest) ProtoMessage() {}
|
||||
|
||||
func (x *EnableRouteRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[3]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EnableRouteRequest.ProtoReflect.Descriptor instead.
|
||||
func (*EnableRouteRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *EnableRouteRequest) GetRouteId() uint64 {
|
||||
if x != nil {
|
||||
return x.RouteId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type EnableRouteResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *EnableRouteResponse) Reset() {
|
||||
*x = EnableRouteResponse{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *EnableRouteResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EnableRouteResponse) ProtoMessage() {}
|
||||
|
||||
func (x *EnableRouteResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[4]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EnableRouteResponse.ProtoReflect.Descriptor instead.
|
||||
func (*EnableRouteResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
type DisableRouteRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
RouteId uint64 `protobuf:"varint,1,opt,name=route_id,json=routeId,proto3" json:"route_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DisableRouteRequest) Reset() {
|
||||
*x = DisableRouteRequest{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *DisableRouteRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DisableRouteRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DisableRouteRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[5]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DisableRouteRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DisableRouteRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *DisableRouteRequest) GetRouteId() uint64 {
|
||||
if x != nil {
|
||||
return x.RouteId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type DisableRouteResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *DisableRouteResponse) Reset() {
|
||||
*x = DisableRouteResponse{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *DisableRouteResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DisableRouteResponse) ProtoMessage() {}
|
||||
|
||||
func (x *DisableRouteResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[6]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DisableRouteResponse.ProtoReflect.Descriptor instead.
|
||||
func (*DisableRouteResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
type GetNodeRoutesRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetNodeRoutesRequest) Reset() {
|
||||
*x = GetNodeRoutesRequest{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *GetNodeRoutesRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetNodeRoutesRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetNodeRoutesRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[7]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetNodeRoutesRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetNodeRoutesRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *GetNodeRoutesRequest) GetNodeId() uint64 {
|
||||
if x != nil {
|
||||
return x.NodeId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type GetNodeRoutesResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Routes []*Route `protobuf:"bytes,1,rep,name=routes,proto3" json:"routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetNodeRoutesResponse) Reset() {
|
||||
*x = GetNodeRoutesResponse{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *GetNodeRoutesResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetNodeRoutesResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetNodeRoutesResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[8]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetNodeRoutesResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetNodeRoutesResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *GetNodeRoutesResponse) GetRoutes() []*Route {
|
||||
if x != nil {
|
||||
return x.Routes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DeleteRouteRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
RouteId uint64 `protobuf:"varint,1,opt,name=route_id,json=routeId,proto3" json:"route_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DeleteRouteRequest) Reset() {
|
||||
*x = DeleteRouteRequest{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[9]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *DeleteRouteRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeleteRouteRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteRouteRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[9]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeleteRouteRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteRouteRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{9}
|
||||
}
|
||||
|
||||
func (x *DeleteRouteRequest) GetRouteId() uint64 {
|
||||
if x != nil {
|
||||
return x.RouteId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type DeleteRouteResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *DeleteRouteResponse) Reset() {
|
||||
*x = DeleteRouteResponse{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[10]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *DeleteRouteResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeleteRouteResponse) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteRouteResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[10]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeleteRouteResponse.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteRouteResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{10}
|
||||
}
|
||||
|
||||
var File_headscale_v1_routes_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_routes_proto_rawDesc = []byte{
|
||||
0x0a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x22, 0xe1, 0x02, 0x0a, 0x05, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x0e, 0x0a,
|
||||
0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x26, 0x0a,
|
||||
0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52,
|
||||
0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
|
||||
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1e, 0x0a,
|
||||
0x0a, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28,
|
||||
0x08, 0x52, 0x0a, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x12, 0x18, 0x0a,
|
||||
0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
|
||||
0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x70, 0x72,
|
||||
0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x50,
|
||||
0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65,
|
||||
0x64, 0x5f, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
|
||||
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41,
|
||||
0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18,
|
||||
0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
|
||||
0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a,
|
||||
0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
||||
0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x64, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x12, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x40, 0x0a, 0x11, 0x47,
|
||||
0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x12, 0x2b, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
|
||||
0x32, 0x13, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x2f, 0x0a,
|
||||
0x12, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x64, 0x22, 0x15,
|
||||
0x0a, 0x13, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x0a, 0x13, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65,
|
||||
0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08,
|
||||
0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07,
|
||||
0x72, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x64, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x69, 0x73, 0x61, 0x62,
|
||||
0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x2f, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f,
|
||||
0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64,
|
||||
0x22, 0x44, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x06, 0x72, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06,
|
||||
0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x2f, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
|
||||
0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08,
|
||||
0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07,
|
||||
0x72, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74,
|
||||
0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x29,
|
||||
0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61,
|
||||
0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f,
|
||||
0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_headscale_v1_routes_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_routes_proto_rawDescData = file_headscale_v1_routes_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_headscale_v1_routes_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_routes_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_routes_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_routes_proto_rawDescData)
|
||||
})
|
||||
return file_headscale_v1_routes_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_headscale_v1_routes_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
|
||||
var file_headscale_v1_routes_proto_goTypes = []any{
|
||||
(*Route)(nil), // 0: headscale.v1.Route
|
||||
(*GetRoutesRequest)(nil), // 1: headscale.v1.GetRoutesRequest
|
||||
(*GetRoutesResponse)(nil), // 2: headscale.v1.GetRoutesResponse
|
||||
(*EnableRouteRequest)(nil), // 3: headscale.v1.EnableRouteRequest
|
||||
(*EnableRouteResponse)(nil), // 4: headscale.v1.EnableRouteResponse
|
||||
(*DisableRouteRequest)(nil), // 5: headscale.v1.DisableRouteRequest
|
||||
(*DisableRouteResponse)(nil), // 6: headscale.v1.DisableRouteResponse
|
||||
(*GetNodeRoutesRequest)(nil), // 7: headscale.v1.GetNodeRoutesRequest
|
||||
(*GetNodeRoutesResponse)(nil), // 8: headscale.v1.GetNodeRoutesResponse
|
||||
(*DeleteRouteRequest)(nil), // 9: headscale.v1.DeleteRouteRequest
|
||||
(*DeleteRouteResponse)(nil), // 10: headscale.v1.DeleteRouteResponse
|
||||
(*Node)(nil), // 11: headscale.v1.Node
|
||||
(*timestamppb.Timestamp)(nil), // 12: google.protobuf.Timestamp
|
||||
}
|
||||
var file_headscale_v1_routes_proto_depIdxs = []int32{
|
||||
11, // 0: headscale.v1.Route.node:type_name -> headscale.v1.Node
|
||||
12, // 1: headscale.v1.Route.created_at:type_name -> google.protobuf.Timestamp
|
||||
12, // 2: headscale.v1.Route.updated_at:type_name -> google.protobuf.Timestamp
|
||||
12, // 3: headscale.v1.Route.deleted_at:type_name -> google.protobuf.Timestamp
|
||||
0, // 4: headscale.v1.GetRoutesResponse.routes:type_name -> headscale.v1.Route
|
||||
0, // 5: headscale.v1.GetNodeRoutesResponse.routes:type_name -> headscale.v1.Route
|
||||
6, // [6:6] is the sub-list for method output_type
|
||||
6, // [6:6] is the sub-list for method input_type
|
||||
6, // [6:6] is the sub-list for extension type_name
|
||||
6, // [6:6] is the sub-list for extension extendee
|
||||
0, // [0:6] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_routes_proto_init() }
|
||||
func file_headscale_v1_routes_proto_init() {
|
||||
if File_headscale_v1_routes_proto != nil {
|
||||
return
|
||||
}
|
||||
file_headscale_v1_node_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_routes_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 11,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_headscale_v1_routes_proto_goTypes,
|
||||
DependencyIndexes: file_headscale_v1_routes_proto_depIdxs,
|
||||
MessageInfos: file_headscale_v1_routes_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_routes_proto = out.File
|
||||
file_headscale_v1_routes_proto_rawDesc = nil
|
||||
file_headscale_v1_routes_proto_goTypes = nil
|
||||
file_headscale_v1_routes_proto_depIdxs = nil
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.1
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/user.proto
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -22,10 +23,7 @@ const (
|
||||
)
|
||||
|
||||
type User struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
@@ -34,6 +32,8 @@ type User struct {
|
||||
ProviderId string `protobuf:"bytes,6,opt,name=provider_id,json=providerId,proto3" json:"provider_id,omitempty"`
|
||||
Provider string `protobuf:"bytes,7,opt,name=provider,proto3" json:"provider,omitempty"`
|
||||
ProfilePicUrl string `protobuf:"bytes,8,opt,name=profile_pic_url,json=profilePicUrl,proto3" json:"profile_pic_url,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *User) Reset() {
|
||||
@@ -123,11 +123,13 @@ func (x *User) GetProfilePicUrl() string {
|
||||
}
|
||||
|
||||
type CreateUserRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
|
||||
PictureUrl string `protobuf:"bytes,4,opt,name=picture_url,json=pictureUrl,proto3" json:"picture_url,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *CreateUserRequest) Reset() {
|
||||
@@ -167,12 +169,32 @@ func (x *CreateUserRequest) GetName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
type CreateUserResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
func (x *CreateUserRequest) GetDisplayName() string {
|
||||
if x != nil {
|
||||
return x.DisplayName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
func (x *CreateUserRequest) GetEmail() string {
|
||||
if x != nil {
|
||||
return x.Email
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *CreateUserRequest) GetPictureUrl() string {
|
||||
if x != nil {
|
||||
return x.PictureUrl
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type CreateUserResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *CreateUserResponse) Reset() {
|
||||
@@ -213,12 +235,11 @@ func (x *CreateUserResponse) GetUser() *User {
|
||||
}
|
||||
|
||||
type RenameUserRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
OldId uint64 `protobuf:"varint,1,opt,name=old_id,json=oldId,proto3" json:"old_id,omitempty"`
|
||||
NewName string `protobuf:"bytes,2,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
OldId uint64 `protobuf:"varint,1,opt,name=old_id,json=oldId,proto3" json:"old_id,omitempty"`
|
||||
NewName string `protobuf:"bytes,2,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *RenameUserRequest) Reset() {
|
||||
@@ -266,11 +287,10 @@ func (x *RenameUserRequest) GetNewName() string {
|
||||
}
|
||||
|
||||
type RenameUserResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *RenameUserResponse) Reset() {
|
||||
@@ -311,11 +331,10 @@ func (x *RenameUserResponse) GetUser() *User {
|
||||
}
|
||||
|
||||
type DeleteUserRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *DeleteUserRequest) Reset() {
|
||||
@@ -356,9 +375,9 @@ func (x *DeleteUserRequest) GetId() uint64 {
|
||||
}
|
||||
|
||||
type DeleteUserResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *DeleteUserResponse) Reset() {
|
||||
@@ -392,13 +411,12 @@ func (*DeleteUserResponse) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type ListUsersRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ListUsersRequest) Reset() {
|
||||
@@ -453,11 +471,10 @@ func (x *ListUsersRequest) GetEmail() string {
|
||||
}
|
||||
|
||||
type ListUsersResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Users []*User `protobuf:"bytes,1,rep,name=users,proto3" json:"users,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Users []*User `protobuf:"bytes,1,rep,name=users,proto3" json:"users,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ListUsersResponse) Reset() {
|
||||
@@ -499,69 +516,51 @@ func (x *ListUsersResponse) GetUsers() []*User {
|
||||
|
||||
var File_headscale_v1_user_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_user_proto_rawDesc = []byte{
|
||||
0x0a, 0x17, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x75,
|
||||
0x73, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
|
||||
0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x83, 0x02, 0x0a, 0x04, 0x55, 0x73, 0x65,
|
||||
0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69,
|
||||
0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
|
||||
0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
|
||||
0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74,
|
||||
0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e,
|
||||
0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x6f,
|
||||
0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
|
||||
0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72,
|
||||
0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72,
|
||||
0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c,
|
||||
0x65, 0x5f, 0x70, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x0d, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x50, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x22, 0x27,
|
||||
0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3c, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74,
|
||||
0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a,
|
||||
0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x52,
|
||||
0x04, 0x75, 0x73, 0x65, 0x72, 0x22, 0x45, 0x0a, 0x11, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x55,
|
||||
0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6f, 0x6c,
|
||||
0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6f, 0x6c, 0x64, 0x49,
|
||||
0x64, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3c, 0x0a, 0x12,
|
||||
0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x26, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x12, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x55, 0x73, 0x65, 0x72, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x22, 0x23, 0x0a, 0x11, 0x44, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
|
||||
0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x22,
|
||||
0x14, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65,
|
||||
0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a,
|
||||
0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d,
|
||||
0x61, 0x69, 0x6c, 0x22, 0x3d, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x75, 0x73, 0x65, 0x72,
|
||||
0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x52, 0x05, 0x75, 0x73, 0x65,
|
||||
0x72, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
const file_headscale_v1_user_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x17headscale/v1/user.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\x83\x02\n" +
|
||||
"\x04User\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\x04R\x02id\x12\x12\n" +
|
||||
"\x04name\x18\x02 \x01(\tR\x04name\x129\n" +
|
||||
"\n" +
|
||||
"created_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x12!\n" +
|
||||
"\fdisplay_name\x18\x04 \x01(\tR\vdisplayName\x12\x14\n" +
|
||||
"\x05email\x18\x05 \x01(\tR\x05email\x12\x1f\n" +
|
||||
"\vprovider_id\x18\x06 \x01(\tR\n" +
|
||||
"providerId\x12\x1a\n" +
|
||||
"\bprovider\x18\a \x01(\tR\bprovider\x12&\n" +
|
||||
"\x0fprofile_pic_url\x18\b \x01(\tR\rprofilePicUrl\"\x81\x01\n" +
|
||||
"\x11CreateUserRequest\x12\x12\n" +
|
||||
"\x04name\x18\x01 \x01(\tR\x04name\x12!\n" +
|
||||
"\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12\x14\n" +
|
||||
"\x05email\x18\x03 \x01(\tR\x05email\x12\x1f\n" +
|
||||
"\vpicture_url\x18\x04 \x01(\tR\n" +
|
||||
"pictureUrl\"<\n" +
|
||||
"\x12CreateUserResponse\x12&\n" +
|
||||
"\x04user\x18\x01 \x01(\v2\x12.headscale.v1.UserR\x04user\"E\n" +
|
||||
"\x11RenameUserRequest\x12\x15\n" +
|
||||
"\x06old_id\x18\x01 \x01(\x04R\x05oldId\x12\x19\n" +
|
||||
"\bnew_name\x18\x02 \x01(\tR\anewName\"<\n" +
|
||||
"\x12RenameUserResponse\x12&\n" +
|
||||
"\x04user\x18\x01 \x01(\v2\x12.headscale.v1.UserR\x04user\"#\n" +
|
||||
"\x11DeleteUserRequest\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\x04R\x02id\"\x14\n" +
|
||||
"\x12DeleteUserResponse\"L\n" +
|
||||
"\x10ListUsersRequest\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\x04R\x02id\x12\x12\n" +
|
||||
"\x04name\x18\x02 \x01(\tR\x04name\x12\x14\n" +
|
||||
"\x05email\x18\x03 \x01(\tR\x05email\"=\n" +
|
||||
"\x11ListUsersResponse\x12(\n" +
|
||||
"\x05users\x18\x01 \x03(\v2\x12.headscale.v1.UserR\x05usersB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
|
||||
var (
|
||||
file_headscale_v1_user_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_user_proto_rawDescData = file_headscale_v1_user_proto_rawDesc
|
||||
file_headscale_v1_user_proto_rawDescData []byte
|
||||
)
|
||||
|
||||
func file_headscale_v1_user_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_user_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_user_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_user_proto_rawDescData)
|
||||
file_headscale_v1_user_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_user_proto_rawDesc), len(file_headscale_v1_user_proto_rawDesc)))
|
||||
})
|
||||
return file_headscale_v1_user_proto_rawDescData
|
||||
}
|
||||
@@ -600,7 +599,7 @@ func file_headscale_v1_user_proto_init() {
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_user_proto_rawDesc,
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_user_proto_rawDesc), len(file_headscale_v1_user_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 9,
|
||||
NumExtensions: 0,
|
||||
@@ -611,7 +610,6 @@ func file_headscale_v1_user_proto_init() {
|
||||
MessageInfos: file_headscale_v1_user_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_user_proto = out.File
|
||||
file_headscale_v1_user_proto_rawDesc = nil
|
||||
file_headscale_v1_user_proto_goTypes = nil
|
||||
file_headscale_v1_user_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
@@ -320,6 +320,45 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/node/{nodeId}/approve_routes": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_SetApprovedRoutes",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1SetApprovedRoutesResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "nodeId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/HeadscaleServiceSetApprovedRoutesBody"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/node/{nodeId}/expire": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_ExpireNode",
|
||||
@@ -388,37 +427,6 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/node/{nodeId}/routes": {
|
||||
"get": {
|
||||
"operationId": "HeadscaleService_GetNodeRoutes",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1GetNodeRoutesResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "nodeId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/node/{nodeId}/tags": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_SetTags",
|
||||
@@ -572,7 +580,8 @@
|
||||
"name": "user",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
@@ -643,122 +652,6 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/routes": {
|
||||
"get": {
|
||||
"summary": "--- Route start ---",
|
||||
"operationId": "HeadscaleService_GetRoutes",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1GetRoutesResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/routes/{routeId}": {
|
||||
"delete": {
|
||||
"operationId": "HeadscaleService_DeleteRoute",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1DeleteRouteResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "routeId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/routes/{routeId}/disable": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_DisableRoute",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1DisableRouteResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "routeId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/routes/{routeId}/enable": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_EnableRoute",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1EnableRouteResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "routeId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/user": {
|
||||
"get": {
|
||||
"operationId": "HeadscaleService_ListUsers",
|
||||
@@ -907,7 +800,19 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
}
|
||||
},
|
||||
"HeadscaleServiceSetApprovedRoutesBody": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"routes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1006,7 +911,8 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
"reusable": {
|
||||
"type": "boolean"
|
||||
@@ -1039,6 +945,15 @@
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"displayName": {
|
||||
"type": "string"
|
||||
},
|
||||
"email": {
|
||||
"type": "string"
|
||||
},
|
||||
"pictureUrl": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1084,18 +999,9 @@
|
||||
"v1DeleteNodeResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1DeleteRouteResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1DeleteUserResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1DisableRouteResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1EnableRouteResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1ExpireApiKeyRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -1119,7 +1025,8 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
"key": {
|
||||
"type": "string"
|
||||
@@ -1137,18 +1044,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1GetNodeRoutesResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"routes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"$ref": "#/definitions/v1Route"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1GetPolicyResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -1161,18 +1056,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1GetRoutesResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"routes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"$ref": "#/definitions/v1Route"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1ListApiKeysResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -1298,6 +1181,24 @@
|
||||
},
|
||||
"online": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"approvedRoutes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"availableRoutes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"subnetRoutes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1305,10 +1206,11 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "string"
|
||||
"$ref": "#/definitions/v1User"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
"key": {
|
||||
"type": "string"
|
||||
@@ -1372,39 +1274,11 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1Route": {
|
||||
"v1SetApprovedRoutesResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
"node": {
|
||||
"$ref": "#/definitions/v1Node"
|
||||
},
|
||||
"prefix": {
|
||||
"type": "string"
|
||||
},
|
||||
"advertised": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"isPrimary": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"createdAt": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"updatedAt": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"deletedAt": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "headscale/v1/routes.proto",
|
||||
"version": "version not set"
|
||||
},
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {},
|
||||
"definitions": {
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"@type": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": {}
|
||||
},
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
227
go.mod
227
go.mod
@@ -1,54 +1,61 @@
|
||||
module github.com/juanfont/headscale
|
||||
|
||||
go 1.23.1
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.2
|
||||
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7
|
||||
github.com/chasefleming/elem-go v0.29.0
|
||||
github.com/coder/websocket v1.8.12
|
||||
github.com/coreos/go-oidc/v3 v3.11.0
|
||||
github.com/arl/statsviz v0.6.0
|
||||
github.com/cenkalti/backoff/v5 v5.0.2
|
||||
github.com/chasefleming/elem-go v0.30.0
|
||||
github.com/coder/websocket v1.8.13
|
||||
github.com/coreos/go-oidc/v3 v3.14.1
|
||||
github.com/creachadair/command v0.1.22
|
||||
github.com/creachadair/flax v0.0.5
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/docker/docker v28.3.3+incompatible
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/glebarez/sqlite v1.11.0
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.2
|
||||
github.com/gofrs/uuid/v5 v5.3.0
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.4
|
||||
github.com/gofrs/uuid/v5 v5.3.2
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0
|
||||
github.com/jagottsicher/termcolor v1.0.2
|
||||
github.com/klauspost/compress v1.17.9
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25
|
||||
github.com/ory/dockertest/v3 v3.11.0
|
||||
github.com/ory/dockertest/v3 v3.12.0
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1
|
||||
github.com/pkg/profile v1.7.0
|
||||
github.com/prometheus/client_golang v1.20.2
|
||||
github.com/prometheus/common v0.58.0
|
||||
github.com/pterm/pterm v0.12.79
|
||||
github.com/puzpuzpuz/xsync/v3 v3.4.0
|
||||
github.com/rs/zerolog v1.33.0
|
||||
github.com/samber/lo v1.47.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/common v0.65.0
|
||||
github.com/pterm/pterm v0.12.81
|
||||
github.com/puzpuzpuz/xsync/v4 v4.1.0
|
||||
github.com/rs/zerolog v1.34.0
|
||||
github.com/samber/lo v1.51.0
|
||||
github.com/sasha-s/go-deadlock v0.3.5
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/viper v1.20.0-alpha.6
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a
|
||||
github.com/tailscale/tailsql v0.0.0-20240418235827-820559f382c1
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/viper v1.20.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
||||
golang.org/x/crypto v0.26.0
|
||||
golang.org/x/exp v0.0.0-20240823005443-9b4947da3948
|
||||
golang.org/x/net v0.28.0
|
||||
golang.org/x/oauth2 v0.22.0
|
||||
golang.org/x/sync v0.8.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1
|
||||
google.golang.org/grpc v1.66.0
|
||||
google.golang.org/protobuf v1.35.1
|
||||
golang.org/x/crypto v0.40.0
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0
|
||||
golang.org/x/net v0.42.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/sync v0.16.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gorm.io/driver/postgres v1.5.9
|
||||
gorm.io/gorm v1.25.11
|
||||
tailscale.com v1.75.0-pre.0.20240926101731-7d1160ddaab7
|
||||
zgo.at/zcache/v2 v2.1.0
|
||||
gorm.io/driver/postgres v1.6.0
|
||||
gorm.io/gorm v1.30.0
|
||||
tailscale.com v1.84.3
|
||||
zgo.at/zcache/v2 v2.2.0
|
||||
zombiezen.com/go/postgrestest v1.0.1
|
||||
)
|
||||
|
||||
@@ -70,10 +77,10 @@ require (
|
||||
// together, e.g:
|
||||
// go get modernc.org/libc@v1.55.3 modernc.org/sqlite@v1.33.1
|
||||
require (
|
||||
modernc.org/libc v1.55.3 // indirect
|
||||
modernc.org/mathutil v1.6.0 // indirect
|
||||
modernc.org/memory v1.8.0 // indirect
|
||||
modernc.org/sqlite v1.33.1 // indirect
|
||||
modernc.org/libc v1.62.1 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.10.0 // indirect
|
||||
modernc.org/sqlite v1.37.0
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -82,151 +89,161 @@ require (
|
||||
atomicgo.dev/schedule v0.1.0 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/akutz/memconn v0.1.0 // indirect
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.26.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect
|
||||
github.com/aws/smithy-go v1.19.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 // indirect
|
||||
github.com/aws/smithy-go v1.22.2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/containerd/console v1.0.4 // indirect
|
||||
github.com/containerd/console v1.0.5 // indirect
|
||||
github.com/containerd/continuity v0.4.5 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect
|
||||
github.com/creachadair/mds v0.14.5 // indirect
|
||||
github.com/creachadair/mds v0.24.3 // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 // indirect
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e // indirect
|
||||
github.com/docker/cli v27.3.1+incompatible // indirect
|
||||
github.com/docker/docker v27.3.1+incompatible // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v28.1.1+incompatible // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.6.0 // indirect
|
||||
github.com/gaissmai/bart v0.11.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/gaissmai/bart v0.18.0 // indirect
|
||||
github.com/glebarez/go-sqlite v1.22.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.2 // indirect
|
||||
github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.0 // indirect
|
||||
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/google/go-github v17.0.0+incompatible // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 // indirect
|
||||
github.com/google/pprof v0.0.0-20240829160300-da1f7e9f2b25 // indirect
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gookit/color v1.5.4 // indirect
|
||||
github.com/gorilla/csrf v1.7.2 // indirect
|
||||
github.com/gorilla/csrf v1.7.3 // indirect
|
||||
github.com/gorilla/securecookie v1.1.2 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
|
||||
github.com/illarion/gonotify/v2 v2.0.3 // indirect
|
||||
github.com/illarion/gonotify/v3 v3.0.2 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/insomniacslk/dhcp v0.0.0-20240129002554-15c9b8791914 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/pgx/v5 v5.6.0 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||
github.com/jackc/pgx/v5 v5.7.4 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.8 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mdlayher/genetlink v1.3.2 // indirect
|
||||
github.com/mdlayher/netlink v1.7.2 // indirect
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 // indirect
|
||||
github.com/mdlayher/sdnotify v1.0.0 // indirect
|
||||
github.com/mdlayher/socket v0.5.0 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/miekg/dns v1.1.58 // indirect
|
||||
github.com/mitchellh/go-ps v1.0.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/sys/user v0.3.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/opencontainers/runc v1.2.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opencontainers/runc v1.3.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus-community/pro-bing v0.4.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rogpeppe/go-internal v1.12.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
github.com/safchain/ethtool v0.3.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.6.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.9.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.7.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/afero v1.14.0 // indirect
|
||||
github.com/spf13/cast v1.8.0 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect
|
||||
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 // indirect
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 // indirect
|
||||
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 // indirect
|
||||
github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 // indirect
|
||||
github.com/tailscale/setec v0.0.0-20240314234648-9da8e7407257 // indirect
|
||||
github.com/tailscale/squibble v0.0.0-20240418235321-9ee0eeb78185 // indirect
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 // indirect
|
||||
github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc // indirect
|
||||
github.com/tcnksm/go-httpstat v0.2.0 // indirect
|
||||
github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e // indirect
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc // indirect
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d // indirect
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 // indirect
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251 // indirect
|
||||
github.com/vishvananda/netns v0.0.4 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect
|
||||
golang.org/x/mod v0.20.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.23.0 // indirect
|
||||
golang.org/x/text v0.17.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.24.0 // indirect
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 // indirect
|
||||
golang.org/x/mod v0.26.0 // indirect
|
||||
golang.org/x/sys v0.34.0 // indirect
|
||||
golang.org/x/term v0.33.0 // indirect
|
||||
golang.org/x/text v0.27.0 // indirect
|
||||
golang.org/x/time v0.10.0 // indirect
|
||||
golang.org/x/tools v0.35.0 // indirect
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 // indirect
|
||||
)
|
||||
|
||||
tool (
|
||||
golang.org/x/tools/cmd/stringer
|
||||
tailscale.com/cmd/viewer
|
||||
)
|
||||
|
||||
594
go.sum
594
go.sum
@@ -1,3 +1,5 @@
|
||||
9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f h1:1C7nZuxUMNz7eiQALRfiqNOm04+m3edWlRff/BYHf0Q=
|
||||
9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f/go.mod h1:hHyrZRryGqVdqrknjq5OWDLGCTJ2NeEvtrpR96mjraM=
|
||||
atomicgo.dev/assert v0.0.2 h1:FiKeMiZSgRrZsPo9qn/7vmr7mCsh5SZyXY4YGYiYwrg=
|
||||
atomicgo.dev/assert v0.0.2/go.mod h1:ut4NcI3QDdJtlmAxQULOmA13Gz6e2DWbSAS8RUOmNYQ=
|
||||
atomicgo.dev/cursor v0.2.0 h1:H6XN5alUJ52FZZUkI7AlJbUc1aW38GWZalpYRPpoPOw=
|
||||
@@ -6,7 +8,6 @@ atomicgo.dev/keyboard v0.2.9 h1:tOsIid3nlPLZ3lwgG8KZMp/SFmr7P0ssEN5JUsm78K8=
|
||||
atomicgo.dev/keyboard v0.2.9/go.mod h1:BC4w9g00XkxH/f1HXhW2sXmJFOCWbKn9xrOunSFtExQ=
|
||||
atomicgo.dev/schedule v0.1.0 h1:nTthAbhZS5YZmgYbb2+DH8uQIZcTlIrd4eYr3UQxEjs=
|
||||
atomicgo.dev/schedule v0.1.0/go.mod h1:xeUa3oAkiuHYh8bKiQBRojqAMq3PXXbJujjb0hw8pEU=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
||||
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
@@ -15,9 +16,8 @@ filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc=
|
||||
filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA=
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ=
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/MarvinJWendt/testza v0.1.0/go.mod h1:7AxNvlfeHP7Z/hDQ5JtE3OKYT3XFUeLCDE2DQninSqs=
|
||||
@@ -41,57 +41,57 @@ github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7V
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/arl/statsviz v0.6.0 h1:jbW1QJkEYQkufd//4NDYRSNBpwJNrdzPahF7ZmoGdyE=
|
||||
github.com/arl/statsviz v0.6.0/go.mod h1:0toboo+YGSUXDaS4g1D5TVS4dXs7S7YYT5J/qnW2h8s=
|
||||
github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4/go.mod h1:usURWEKSNNAcAZuzRn/9ZYPT8aZQkR7xcCtunK/LkJo=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.26.6 h1:Z/7w9bUqlRI0FFQpetVuFYEsjzE3h7fpU6HuGmfPL/o=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.26.6/go.mod h1:uKU6cnDmYCvJ+pxO9S4cWDb2yWWIH5hra+32hVh1MI4=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 h1:n3GDfwqF2tzEkXlv5cuy4iy7LpKDtqDMcNLfZDu9rls=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9 h1:ugD6qzjYtB7zM5PN/ZIeaAIyefPaD82G8+SJopgvUpw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9/go.mod h1:YD0aYBWCrPENpHolhKw2XDlTIWae2GKXT1T4o6N6hiM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9 h1:/90OR2XbSYfXucBMJ4U14wrjlfleq/0SB6dZDPncgmo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9/go.mod h1:dN/Of9/fNZet7UrQQ6kTDo/VSwKPIq94vjlU16bRARc=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9 h1:iEAeF6YC3l4FzlJPP9H3Ko1TXpdjdqWffxXjp8SY6uk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9/go.mod h1:kjsXoK23q9Z/tLBrckZLLyvjhZoS+AGrzqzUfEClvMM=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.47.7 h1:o0ASbVwUAIrfp/WcCac+6jioZt4Hd8k/1X8u7GJ/QeM=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.47.7/go.mod h1:vADO6Jn+Rq4nDtfwNjhgR84qkZwiC6FqCaXdw/kYwjA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31 h1:8IwBjuLdqIO1dGB+dZ9zJEl8wzY3bVYxcs0Xyu/Lsc0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31/go.mod h1:8tMBcuVjL4kP/ECEIWTCWtwV2kj6+ouEKl4cqR4iWLw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5 h1:siiQ+jummya9OLPDEyHVb2dLW4aOMe22FGDd0sAfuSw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5/go.mod h1:iHVx2J9pWzITdP5MJY6qWfG34TfD9EA+Qi3eV6qQCXw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 h1:tkVNm99nkJnFo1H9IIQb5QkCiPcvCDn3Pos+IeTbGRA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12/go.mod h1:dIVlquSPUMqEJtx2/W17SM2SuESRaVEhEV9alcMqxjw=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 h1:JBod0SnNqcWQ0+uAyzeRFG1zCHotW8DukumYYyNy0zo=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3/go.mod h1:FHSHmyEUkzRbaFFqqm6bkLAOQHgqhsLmfCahvCBMiyA=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0 h1:IOdss+igJDFdic9w3WKwxGCmHqUxydvIhJOm9LJ32Dk=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U=
|
||||
github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM=
|
||||
github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w=
|
||||
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
|
||||
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chasefleming/elem-go v0.29.0 h1:WwrjQcVn6xldhexluvl2Z3sgKi9HTMuzWeEXO4PHsmg=
|
||||
github.com/chasefleming/elem-go v0.29.0/go.mod h1:hz73qILBIKnTgOujnSMtEj20/epI+f6vg71RUilJAA4=
|
||||
github.com/chasefleming/elem-go v0.30.0 h1:BlhV1ekv1RbFiM8XZUQeln1Ikb4D+bu2eDO4agREvok=
|
||||
github.com/chasefleming/elem-go v0.30.0/go.mod h1:hz73qILBIKnTgOujnSMtEj20/epI+f6vg71RUilJAA4=
|
||||
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
|
||||
github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
|
||||
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
|
||||
@@ -101,25 +101,35 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
||||
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok=
|
||||
github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo=
|
||||
github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
|
||||
github.com/cilium/ebpf v0.17.3 h1:FnP4r16PWYSE4ux6zN+//jMcW4nMVRvuTLVTvCjyyjg=
|
||||
github.com/cilium/ebpf v0.17.3/go.mod h1:G5EDHij8yiLzaqn0WjyfJHvRa+3aDlReIaLVRMvOyJk=
|
||||
github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
|
||||
github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
|
||||
github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||
github.com/containerd/console v1.0.5 h1:R0ymNeydRqH2DmakFNdmjR2k0t7UPuiOV/N/27/qqsc=
|
||||
github.com/containerd/console v1.0.5/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||
github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4=
|
||||
github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
|
||||
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
|
||||
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
|
||||
github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI=
|
||||
github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
|
||||
github.com/coreos/go-oidc/v3 v3.14.1 h1:9ePWwfdwC4QKRlCXsJGou56adA/owXczOzwKdOumLqk=
|
||||
github.com/coreos/go-oidc/v3 v3.14.1/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creachadair/mds v0.14.5 h1:2amuO4yCbQkaAyDoLO5iCbwbTRQZz4EpRhOejQbf4+8=
|
||||
github.com/creachadair/mds v0.14.5/go.mod h1:4vrFYUzTXMJpMBU+OA292I6IUxKWCCfZkgXg+/kBZMo=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creachadair/command v0.1.22 h1:WmdrURwZdmPD1jm13SjKooaMoqo7mW1qI2BPCShs154=
|
||||
github.com/creachadair/command v0.1.22/go.mod h1:YFc+OMGucqTpxwQg/iJnNg8BMNmRPDK60rYy8ckgKwE=
|
||||
github.com/creachadair/flax v0.0.5 h1:zt+CRuXQASxwQ68e9GHAOnEgAU29nF0zYMHOCrL5wzE=
|
||||
github.com/creachadair/flax v0.0.5/go.mod h1:F1PML0JZLXSNDMNiRGK2yjm5f+L9QCHchyHBldFymj8=
|
||||
github.com/creachadair/mds v0.24.3 h1:X7cM2ymZSyl4IVWnfyXLxRXMJ6awhbcWvtLPhfnTaqI=
|
||||
github.com/creachadair/mds v0.24.3/go.mod h1:0oeHt9QWu8VfnmskOL4zi2CumjEvB29ScmtOmdrhFeU=
|
||||
github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc=
|
||||
github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0=
|
||||
@@ -132,12 +142,14 @@ github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 h1:vrC07UZcgPzu/Oj
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0/go.mod h1:Nx87SkVqTKd8UtT+xu7sM/l+LgXs6c0aHrlKusR+2EQ=
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q=
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
|
||||
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
|
||||
github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ=
|
||||
github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
|
||||
github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k=
|
||||
github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI=
|
||||
github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
@@ -146,86 +158,82 @@ github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI=
|
||||
github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
|
||||
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
|
||||
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
|
||||
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA=
|
||||
github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc=
|
||||
github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/gaissmai/bart v0.18.0 h1:jQLBT/RduJu0pv/tLwXE+xKPgtWJejbxuXAR+wLJafo=
|
||||
github.com/gaissmai/bart v0.18.0/go.mod h1:JJzMAhNF5Rjo4SF4jWBrANuJfqY+FvsFhW7t1UZJ+XY=
|
||||
github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I=
|
||||
github.com/github/fakeca v0.1.0/go.mod h1:+bormgoGMMuamOscx7N91aOuUST7wdaJ2rNjeohylyo=
|
||||
github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ=
|
||||
github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc=
|
||||
github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw=
|
||||
github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.2 h1:F/d1hpHbRAvKezziV2CC5KUE82cVe9zTgHSBoOOZ4CY=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.2/go.mod h1:9nHVX6z3FCMCQPA7PThGcA55t22yKQfK/Dnsf5i7hUo=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk=
|
||||
github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
|
||||
github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg=
|
||||
github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.4 h1:KOPEt27qy1cNzHfMZbp9YTmEuzkY4F4wrdsJW9WFk1U=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.4/go.mod h1:y/6gPAH6QGAgP1UfHMiXcqGeJ88/GRQbfCReE1JJD5Y=
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY=
|
||||
github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.1.0 h1:cYSYxd3pw5zd2FSXk2vGdn9igQU2PS8MuxrCOCl0FdY=
|
||||
github.com/go-jose/go-jose/v4 v4.1.0/go.mod h1:GG/vqmYm3Von2nYiB2vGTXzdoNKE5tix5tuc6iAd+sw=
|
||||
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 h1:F8d1AJ6M9UQCavhwmO6ZsrYLfG8zVFWfEfMS2MXPkSY=
|
||||
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 h1:cf60tHxREO3g1nroKr2osU3JWZsJzkfi7rEg+oAB0Lo=
|
||||
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737/go.mod h1:MIS0jDzbU/vuM9MC4YnBITCv+RYuTRq8dJzmCrFsK9g=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg=
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU=
|
||||
github.com/gofrs/uuid/v5 v5.3.0 h1:m0mUMr+oVYUdxpMLgSYCZiXe7PuVPnI94+OMeVBNedk=
|
||||
github.com/gofrs/uuid/v5 v5.3.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
|
||||
github.com/gofrs/uuid/v5 v5.3.2 h1:2jfO8j3XgSwlz/wHqemAEugfnTlikAYHhnqQ8Xh4fE0=
|
||||
github.com/gofrs/uuid/v5 v5.3.2/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
|
||||
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/go-tpm v0.9.4 h1:awZRf9FwOeTunQmHoDYSHJps3ie6f1UlhS1fOdPEt1I=
|
||||
github.com/google/go-tpm v0.9.4/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI=
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/pprof v0.0.0-20240829160300-da1f7e9f2b25 h1:sEDPKUw6iPjczdu33njxFjO6tYa9bfc0z/QyB/zSsBw=
|
||||
github.com/google/pprof v0.0.0-20240829160300-da1f7e9f2b25/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a h1:rDA3FfmxwXR+BVKKdz55WwMJ1pD2hJQNW31d+l3mPk4=
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
@@ -234,28 +242,26 @@ github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQ
|
||||
github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo=
|
||||
github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0=
|
||||
github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w=
|
||||
github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI=
|
||||
github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk=
|
||||
github.com/gorilla/csrf v1.7.3 h1:BHWt6FTLZAb2HtWT5KDBf6qgpZzvtbp9QWDRKZMXJC0=
|
||||
github.com/gorilla/csrf v1.7.3/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA=
|
||||
github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 h1:+epNPbD5EqgpEMm5wrl4Hqts3jZt8+kYaqUisuuIGTk=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
|
||||
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
|
||||
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU=
|
||||
github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo=
|
||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
|
||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A=
|
||||
github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE=
|
||||
github.com/illarion/gonotify/v3 v3.0.2 h1:O7S6vcopHexutmpObkeWsnzMJt/r1hONIEogeVNmJMk=
|
||||
github.com/illarion/gonotify/v3 v3.0.2/go.mod h1:HWGPdPe817GfvY3w7cx6zkbzNZfi3QjcBm/wgVvEL1U=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/insomniacslk/dhcp v0.0.0-20240129002554-15c9b8791914 h1:kD8PseueGeYiid/Mmcv17Q0Qqicc4F46jcX22L/e/Hs=
|
||||
@@ -264,10 +270,10 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY=
|
||||
github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw=
|
||||
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
||||
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jackc/pgx/v5 v5.7.4 h1:9wKznZrhWa2QiHL+NjTSPP6yjl3451BX3imWDnokYlg=
|
||||
github.com/jackc/pgx/v5 v5.7.4/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jagottsicher/termcolor v1.0.2 h1:fo0c51pQSuLBN1+yVX2ZE+hE+P7ULb/TY8eRowJnrsM=
|
||||
github.com/jagottsicher/termcolor v1.0.2/go.mod h1:RcH8uFwF/0wbEdQmi83rjmlJ+QOKdMSE9Rc1BEB7zFo=
|
||||
github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g=
|
||||
@@ -281,23 +287,19 @@ github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHW
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
|
||||
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk=
|
||||
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 h1:JfD4jthWBqZMEffc5RjgmlzpYttAVw1sdnmiNaPO3hE=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1/go.mod h1:xJjT7t59UIZ62GLZbv6PLLo8VFrostJMPBAheR6OM8w=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU=
|
||||
github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ=
|
||||
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk=
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
@@ -320,8 +322,9 @@ github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8
|
||||
github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
@@ -332,8 +335,8 @@ github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6T
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw=
|
||||
github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o=
|
||||
github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g=
|
||||
github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw=
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg=
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o=
|
||||
github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c=
|
||||
github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE=
|
||||
github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI=
|
||||
@@ -347,10 +350,16 @@ github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc
|
||||
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
|
||||
github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
|
||||
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
|
||||
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
||||
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
|
||||
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
|
||||
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
|
||||
@@ -361,25 +370,23 @@ github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25 h1:9bCMuD3Tc
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25/go.mod h1:eDjgYHYDJbPLBLsyZ6qRaugP0mX8vePOhZ5id1fdzJw=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/opencontainers/runc v1.2.2 h1:jTg3Vw2A5f0N9PoxFTEwUhvpANGaNPT3689Yfd/zaX0=
|
||||
github.com/opencontainers/runc v1.2.2/go.mod h1:/PXzF0h531HTMsYQnmxXkBD7YaGShm/2zcRB79dksUc=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/opencontainers/runc v1.3.0 h1:cvP7xbEvD0QQAs0nZKLzkVog2OPZhI/V2w3WmTmUSXI=
|
||||
github.com/opencontainers/runc v1.3.0/go.mod h1:9wbWt42gV+KRxKRVVugNP6D5+PQciRbenB4fLVsqGPs=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/ory/dockertest/v3 v3.11.0 h1:OiHcxKAvSDUwsEVh2BjxQQc/5EHz9n0va9awCtNGuyA=
|
||||
github.com/ory/dockertest/v3 v3.11.0/go.mod h1:VIPxS1gwT9NpPOrfD3rACs8Y9Z7yhzO4SB194iUDnUI=
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
|
||||
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
|
||||
github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 h1:Dx7Ovyv/SFnMFw3fD4oEoeorXc6saIiQ23LrGLth0Gw=
|
||||
github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw=
|
||||
github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a h1:S+AGcmAESQ0pXCUNnRH7V+bOUIgkSX5qVt2cNKCrm0Q=
|
||||
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1 h1:EMacvLRUd2O1K0eWod27ZP5CY1iTNkhBDLSN+Q4JEvA=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1/go.mod h1:qXbiq/2X4ZUMMshsqlWyTHOcw7ns+GZmlqZZN05ZHcQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
@@ -391,13 +398,12 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4=
|
||||
github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4=
|
||||
github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
|
||||
github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.58.0 h1:N+N8vY4/23r6iYfD3UQZUoJPnUYAo7v6LG5XZxjZTXo=
|
||||
github.com/prometheus/common v0.58.0/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
|
||||
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/pterm/pterm v0.12.27/go.mod h1:PhQ89w4i95rhgE+xedAoqous6K9X+r6aSOI2eFF7DZI=
|
||||
@@ -407,104 +413,99 @@ github.com/pterm/pterm v0.12.31/go.mod h1:32ZAWZVXD7ZfG0s8qqHXePte42kdz8ECtRyEej
|
||||
github.com/pterm/pterm v0.12.33/go.mod h1:x+h2uL+n7CP/rel9+bImHD5lF3nM9vJj80k9ybiiTTE=
|
||||
github.com/pterm/pterm v0.12.36/go.mod h1:NjiL09hFhT/vWjQHSj1athJpx6H8cjpHXNAK5bUw8T8=
|
||||
github.com/pterm/pterm v0.12.40/go.mod h1:ffwPLwlbXxP+rxT0GsgDTzS3y3rmpAO1NMjUkGTYf8s=
|
||||
github.com/pterm/pterm v0.12.79 h1:lH3yrYMhdpeqX9y5Ep1u7DejyHy7NSQg9qrBjF9dFT4=
|
||||
github.com/pterm/pterm v0.12.79/go.mod h1:1v/gzOF1N0FsjbgTHZ1wVycRkKiatFvJSJC4IGaQAAo=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.4.0 h1:DuVBAdXuGFHv8adVXjWWZ63pJq+NRXOWVXlKDBZ+mJ4=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.4.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/pterm/pterm v0.12.81 h1:ju+j5I2++FO1jBKMmscgh5h5DPFDFMB7epEjSoKehKA=
|
||||
github.com/pterm/pterm v0.12.81/go.mod h1:TyuyrPjnxfwP+ccJdBTeWHtd/e0ybQHkOS/TakajZCw=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.1.0 h1:x9eHRl4QhZFIPJ17yl4KKW9xLyVWbb3/Yq4SXpjF71U=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.1.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
||||
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
|
||||
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0=
|
||||
github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs=
|
||||
github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk=
|
||||
github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0=
|
||||
github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc=
|
||||
github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU=
|
||||
github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k=
|
||||
github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk=
|
||||
github.com/samber/lo v1.51.0 h1:kysRYLbHy/MB7kQZf5DSN50JHmMsNEdeY24VzJFu7wI=
|
||||
github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
|
||||
github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU=
|
||||
github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U=
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
|
||||
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
||||
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
|
||||
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.20.0-alpha.6 h1:f65Cr/+2qk4GfHC0xqT/isoupQppwN5+VLRztUGTDbY=
|
||||
github.com/spf13/viper v1.20.0-alpha.6/go.mod h1:CGBZzv0c9fOUASm6rfus4wdeIjR/04NOLq1P4KRhX3k=
|
||||
github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA=
|
||||
github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo=
|
||||
github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk=
|
||||
github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
|
||||
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP5LWHEY//SWsYkSO3RWRZo4OSWagh3YD2vQ=
|
||||
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e/go.mod h1:XrBNfAFN+pwoWuksbFS9Ccxnopa15zJGgXRFN90l3K4=
|
||||
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8Jj4P4c1a3CtQyMaTVCznlkLZI++hok4=
|
||||
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg=
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 h1:rXZGgEa+k2vJM8xT0PoSKfVXwFGPQ3z3CJfmnHJkZZw=
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ=
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 h1:SRL6irQkKGQKKLzvQP/ke/2ZuB7Py5+XuqtOgSj+iMM=
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ=
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio=
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8=
|
||||
github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw=
|
||||
github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8=
|
||||
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33 h1:idh63uw+gsG05HwjZsAENCG4KZfyvjK03bpjxa5qRRk=
|
||||
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo=
|
||||
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU=
|
||||
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0=
|
||||
github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w=
|
||||
github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU=
|
||||
github.com/tailscale/setec v0.0.0-20240314234648-9da8e7407257 h1:6WsbDYsikRNmmbfZoRoyIEA9tfl0aspPAE0t7nBj2B4=
|
||||
github.com/tailscale/setec v0.0.0-20240314234648-9da8e7407257/go.mod h1:hrq01/0LUDZf4mMkcZ7Ovmy33jvCi4RpESpb9kPxV6E=
|
||||
github.com/tailscale/squibble v0.0.0-20240418235321-9ee0eeb78185 h1:zT+qB+2Ghulj50d5Wq6h6vQYqD2sPdhy4FF6+FHedVE=
|
||||
github.com/tailscale/squibble v0.0.0-20240418235321-9ee0eeb78185/go.mod h1:LoIjI6z/6efr9ebISQ5l2vjQmjc8QJrAYZdy3Ec3sVs=
|
||||
github.com/tailscale/tailsql v0.0.0-20240418235827-820559f382c1 h1:wmsnxEEuRlgK7Bhdkmm0JGrjjc0JoHZThLLo0WXXbLs=
|
||||
github.com/tailscale/tailsql v0.0.0-20240418235827-820559f382c1/go.mod h1:XN193fbz9RR/5stlWPMMIZR+TTa1BUkDJm5Azwzxwgw=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA=
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc=
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d h1:mnqtPWYyvNiPU9l9tzO2YbHXU/xV664XthZYA26lOiE=
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d/go.mod h1:9BzmlFc3OLqLzLTF/5AY+BMs+clxMqyhSGzgXIm8mNI=
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694 h1:95eIP97c88cqAFU/8nURjgI9xxPbD+Ci6mY/a79BI/w=
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694/go.mod h1:veguaG8tVg1H/JG5RfpoUW41I+O8ClPElo/fTYr8mMk=
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97 h1:JJkDnrAhHvOCttk8z9xeZzcDlzzkRA7+Duxj9cwOyxk=
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97/go.mod h1:9jS8HxwsP2fU4ESZ7DZL+fpH/U66EVlVMzdgznH12RM=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
|
||||
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M=
|
||||
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc h1:cezaQN9pvKVaw56Ma5qr/G646uKIYP0yQf+OyWN/okc=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251 h1:h/41LFTrwMxB9Xvvug0kRdQCU5TlV1+pAMQw0ZtDE3U=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4=
|
||||
github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek=
|
||||
github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg=
|
||||
github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA=
|
||||
github.com/tc-hib/winres v0.2.1/go.mod h1:C/JaNhH3KBvhNKVbvdlDWkbMDO9H4fKKDaN7/07SSuk=
|
||||
github.com/tcnksm/go-httpstat v0.2.0 h1:rP7T5e5U2HfmOBmZzGgGZjBQ5/GluWUylujl0tJ04I0=
|
||||
github.com/tcnksm/go-httpstat v0.2.0/go.mod h1:s3JVJFtQxtBEBC9dwcdTTXS9xFnM3SXAZwPG41aurT8=
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e h1:IWllFTiDjjLIf2oeKxpIUmtiDV5sn71VgeQgg6vcE7k=
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e/go.mod h1:d7u6HkTYKSv5m6MCKkOQlHwaShTMl3HjqSGW3XtVhXM=
|
||||
github.com/tink-crypto/tink-go/v2 v2.1.0 h1:QXFBguwMwTIaU17EgZpEJWsUSc60b1BAGTzBIoMdmok=
|
||||
github.com/tink-crypto/tink-go/v2 v2.1.0/go.mod h1:y1TnYFt1i2eZVfx4OGc+C+EMp4CoKWAw2VSEuoicHHI=
|
||||
github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs=
|
||||
github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI=
|
||||
github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw=
|
||||
github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264=
|
||||
github.com/u-root/u-root v0.14.0 h1:Ka4T10EEML7dQ5XDvO9c3MBN8z4nuSnGjcd1jmU2ivg=
|
||||
github.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1WMluqE=
|
||||
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM=
|
||||
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
|
||||
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||
@@ -523,14 +524,30 @@ github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJu
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ=
|
||||
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
|
||||
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8=
|
||||
go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g=
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek=
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g=
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M=
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
@@ -538,29 +555,20 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
|
||||
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 h1:kx6Ds3MlpiUHKj7syVnbp57++8WpuKPcR5yjLBjvLEA=
|
||||
golang.org/x/exp v0.0.0-20240823005443-9b4947da3948/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240119083558-1b970713d09a h1:8qmSSA8Gz/1kTrCe0nqR0R3Gb/NDhykzWw2q2mWZydM=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240119083558-1b970713d09a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.18.0 h1:jGzIakQa/ZXI1I0Fxvaa9W7yP25TqT6cHIHn+6CqvSQ=
|
||||
golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.24.0 h1:AN7zRgVsbvmTfNyqIbbOraYL8mSwcKncEj8ofjgzcMQ=
|
||||
golang.org/x/image v0.24.0/go.mod h1:4b/ITuLfqYq1hqZcjofwctIhi7sZh2WaCjvsBNjjya8=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
|
||||
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
|
||||
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -569,26 +577,21 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
|
||||
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
|
||||
golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -599,24 +602,21 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -624,8 +624,8 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
|
||||
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
|
||||
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
|
||||
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
@@ -633,23 +633,18 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
|
||||
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4=
|
||||
golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
|
||||
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
|
||||
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
|
||||
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -658,26 +653,15 @@ golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeu
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c=
|
||||
google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
|
||||
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
|
||||
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
@@ -690,51 +674,47 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/postgres v1.5.9 h1:DkegyItji119OlcaLjqN11kHoUgZ/j13E0jkJZgD6A8=
|
||||
gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI=
|
||||
gorm.io/gorm v1.25.11 h1:/Wfyg1B/je1hnDx3sMkX+gAlxrlZpn6X0BXRlwXlvHg=
|
||||
gorm.io/gorm v1.25.11/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
|
||||
gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
|
||||
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
|
||||
gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs=
|
||||
gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8=
|
||||
gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I=
|
||||
honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs=
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k=
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM=
|
||||
honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
|
||||
honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
|
||||
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
|
||||
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
||||
modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ=
|
||||
modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
|
||||
modernc.org/ccgo/v4 v4.19.2 h1:lwQZgvboKD0jBwdaeVCTouxhxAyN6iawF3STraAal8Y=
|
||||
modernc.org/ccgo/v4 v4.19.2/go.mod h1:ysS3mxiMV38XGRTTcgo0DQTeTmAO4oCmJl1nX9VFI3s=
|
||||
modernc.org/cc/v4 v4.25.2 h1:T2oH7sZdGvTaie0BRNFbIYsabzCxUQg8nLqCdQ2i0ic=
|
||||
modernc.org/cc/v4 v4.25.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.25.1 h1:TFSzPrAGmDsdnhT9X2UrcPMI3N/mJ9/X9ykKXwLhDsU=
|
||||
modernc.org/ccgo/v4 v4.25.1/go.mod h1:njjuAYiPflywOOrm3B7kCB444ONP5pAVr8PIEoE0uDw=
|
||||
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
|
||||
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
|
||||
modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw=
|
||||
modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU=
|
||||
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 h1:5D53IMaUuA5InSeMu9eJtlQXS2NxAhyWQvkKEgXZhHI=
|
||||
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4=
|
||||
modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U=
|
||||
modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w=
|
||||
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
|
||||
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
|
||||
modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E=
|
||||
modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU=
|
||||
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
||||
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
||||
modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=
|
||||
modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss=
|
||||
modernc.org/sqlite v1.33.1 h1:trb6Z3YYoeM9eDL1O8do81kP+0ejv+YzgyFo+Gwy0nM=
|
||||
modernc.org/sqlite v1.33.1/go.mod h1:pXV2xHxhzXZsgT/RtTFAPY6JJDEvOTcTdwADQCCWD4k=
|
||||
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
|
||||
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
|
||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||
modernc.org/libc v1.62.1 h1:s0+fv5E3FymN8eJVmnk0llBe6rOxCu/DEU+XygRbS8s=
|
||||
modernc.org/libc v1.62.1/go.mod h1:iXhATfJQLjG3NWy56a6WVU73lWOcdYVxsvwCgoPljuo=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.10.0 h1:fzumd51yQ1DxcOxSO+S6X7+QTuVU+n8/Aj7swYjFfC4=
|
||||
modernc.org/memory v1.10.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
|
||||
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.37.0 h1:s1TMe7T3Q3ovQiK2Ouz4Jwh7dw4ZDqbebSDTlSJdfjI=
|
||||
modernc.org/sqlite v1.37.0/go.mod h1:5YiWv+YviqGMuGw4V+PNplcyaJ5v+vQd7TQOgkACoJM=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k=
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
|
||||
tailscale.com v1.75.0-pre.0.20240926101731-7d1160ddaab7 h1:nfRWV6ECxwNvvXKtbqSVstjlEi1BWktzv3FuxWpyyx0=
|
||||
tailscale.com v1.75.0-pre.0.20240926101731-7d1160ddaab7/go.mod h1:xKxYf3B3PuezFlRaMT+VhuVu8XTFUTLy+VCzLPMJVmg=
|
||||
zgo.at/zcache/v2 v2.1.0 h1:USo+ubK+R4vtjw4viGzTe/zjXyPw6R7SK/RL3epBBxs=
|
||||
zgo.at/zcache/v2 v2.1.0/go.mod h1:gyCeoLVo01QjDZynjime8xUGHHMbsLiPyUTBpDGd4Gk=
|
||||
tailscale.com v1.84.3 h1:Ur9LMedSgicwbqpy5xn7t49G8490/s6rqAJOk5Q5AYE=
|
||||
tailscale.com v1.84.3/go.mod h1:6/S63NMAhmncYT/1zIPDJkvCuZwMw+JnUuOfSPNazpo=
|
||||
zgo.at/zcache/v2 v2.2.0 h1:K29/IPjMniZfveYE+IRXfrl11tMzHkIPuyGrfVZ2fGo=
|
||||
zgo.at/zcache/v2 v2.2.0/go.mod h1:gyCeoLVo01QjDZynjime8xUGHHMbsLiPyUTBpDGd4Gk=
|
||||
zombiezen.com/go/postgrestest v1.0.1 h1:aXoADQAJmZDU3+xilYVut0pHhgc0sF8ZspPW9gFNwP4=
|
||||
zombiezen.com/go/postgrestest v1.0.1/go.mod h1:marlZezr+k2oSJrvXHnZUs1olHqpE9czlz8ZYkVxliQ=
|
||||
|
||||
386
hscontrol/app.go
386
hscontrol/app.go
@@ -5,7 +5,6 @@ import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // nolint
|
||||
@@ -20,24 +19,24 @@ import (
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/gorilla/mux"
|
||||
grpcMiddleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
grpcRuntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/capver"
|
||||
"github.com/juanfont/headscale/hscontrol/db"
|
||||
"github.com/juanfont/headscale/hscontrol/derp"
|
||||
derpServer "github.com/juanfont/headscale/hscontrol/derp/server"
|
||||
"github.com/juanfont/headscale/hscontrol/dns"
|
||||
"github.com/juanfont/headscale/hscontrol/mapper"
|
||||
"github.com/juanfont/headscale/hscontrol/notifier"
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/state"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/types/change"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
zerolog "github.com/philip-bui/grpc-zerolog"
|
||||
"github.com/pkg/profile"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
zl "github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/sasha-s/go-deadlock"
|
||||
"golang.org/x/crypto/acme"
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -49,13 +48,11 @@ import (
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/reflection"
|
||||
"google.golang.org/grpc/status"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/util/dnsname"
|
||||
zcache "zgo.at/zcache/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -68,37 +65,39 @@ var (
|
||||
)
|
||||
)
|
||||
|
||||
var (
|
||||
debugDeadlock = envknob.Bool("HEADSCALE_DEBUG_DEADLOCK")
|
||||
debugDeadlockTimeout = envknob.RegisterDuration("HEADSCALE_DEBUG_DEADLOCK_TIMEOUT")
|
||||
)
|
||||
|
||||
func init() {
|
||||
deadlock.Opts.Disable = !debugDeadlock
|
||||
if debugDeadlock {
|
||||
deadlock.Opts.DeadlockTimeout = debugDeadlockTimeout()
|
||||
deadlock.Opts.PrintAllCurrentGoroutines = true
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
AuthPrefix = "Bearer "
|
||||
updateInterval = 5 * time.Second
|
||||
privateKeyFileMode = 0o600
|
||||
headscaleDirPerm = 0o700
|
||||
|
||||
registerCacheExpiration = time.Minute * 15
|
||||
registerCacheCleanup = time.Minute * 20
|
||||
)
|
||||
|
||||
// Headscale represents the base app of the service.
|
||||
type Headscale struct {
|
||||
cfg *types.Config
|
||||
db *db.HSDatabase
|
||||
ipAlloc *db.IPAllocator
|
||||
state *state.State
|
||||
noisePrivateKey *key.MachinePrivate
|
||||
ephemeralGC *db.EphemeralGarbageCollector
|
||||
|
||||
DERPMap *tailcfg.DERPMap
|
||||
DERPServer *derpServer.DERPServer
|
||||
|
||||
polManOnce sync.Once
|
||||
polMan policy.PolicyManager
|
||||
// Things that generate changes
|
||||
extraRecordMan *dns.ExtraRecordsMan
|
||||
|
||||
mapper *mapper.Mapper
|
||||
nodeNotifier *notifier.Notifier
|
||||
|
||||
registrationCache *zcache.Cache[string, types.Node]
|
||||
|
||||
authProvider AuthProvider
|
||||
authProvider AuthProvider
|
||||
mapBatcher mapper.Batcher
|
||||
|
||||
pollNetMapStreamWG sync.WaitGroup
|
||||
}
|
||||
@@ -123,42 +122,36 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
return nil, fmt.Errorf("failed to read or create Noise protocol private key: %w", err)
|
||||
}
|
||||
|
||||
registrationCache := zcache.New[string, types.Node](
|
||||
registerCacheExpiration,
|
||||
registerCacheCleanup,
|
||||
)
|
||||
s, err := state.NewState(cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init state: %w", err)
|
||||
}
|
||||
|
||||
app := Headscale{
|
||||
cfg: cfg,
|
||||
noisePrivateKey: noisePrivateKey,
|
||||
registrationCache: registrationCache,
|
||||
pollNetMapStreamWG: sync.WaitGroup{},
|
||||
nodeNotifier: notifier.NewNotifier(cfg),
|
||||
state: s,
|
||||
}
|
||||
|
||||
app.db, err = db.NewHeadscaleDatabase(
|
||||
cfg.Database,
|
||||
cfg.BaseDomain,
|
||||
registrationCache,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
app.ipAlloc, err = db.NewIPAllocator(app.db, cfg.PrefixV4, cfg.PrefixV6, cfg.IPAllocation)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
app.ephemeralGC = db.NewEphemeralGarbageCollector(func(ni types.NodeID) {
|
||||
if err := app.db.DeleteEphemeralNode(ni); err != nil {
|
||||
log.Err(err).Uint64("node.id", ni.Uint64()).Msgf("failed to delete ephemeral node")
|
||||
// Initialize ephemeral garbage collector
|
||||
ephemeralGC := db.NewEphemeralGarbageCollector(func(ni types.NodeID) {
|
||||
node, err := app.state.GetNodeByID(ni)
|
||||
if err != nil {
|
||||
log.Err(err).Uint64("node.id", ni.Uint64()).Msgf("failed to get ephemeral node for deletion")
|
||||
return
|
||||
}
|
||||
})
|
||||
|
||||
if err = app.loadPolicyManager(); err != nil {
|
||||
return nil, fmt.Errorf("failed to load ACL policy: %w", err)
|
||||
}
|
||||
policyChanged, err := app.state.DeleteNode(node)
|
||||
if err != nil {
|
||||
log.Err(err).Uint64("node.id", ni.Uint64()).Msgf("failed to delete ephemeral node")
|
||||
return
|
||||
}
|
||||
|
||||
app.Change(policyChanged)
|
||||
log.Debug().Uint64("node.id", ni.Uint64()).Msgf("deleted ephemeral node")
|
||||
})
|
||||
app.ephemeralGC = ephemeralGC
|
||||
|
||||
var authProvider AuthProvider
|
||||
authProvider = NewAuthProviderWeb(cfg.ServerURL)
|
||||
@@ -167,12 +160,9 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
defer cancel()
|
||||
oidcProvider, err := NewAuthProviderOIDC(
|
||||
ctx,
|
||||
&app,
|
||||
cfg.ServerURL,
|
||||
&cfg.OIDC,
|
||||
app.db,
|
||||
app.nodeNotifier,
|
||||
app.ipAlloc,
|
||||
app.polMan,
|
||||
)
|
||||
if err != nil {
|
||||
if cfg.OIDC.OnlyStartIfOIDCIsAvailable {
|
||||
@@ -191,10 +181,14 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
|
||||
var magicDNSDomains []dnsname.FQDN
|
||||
if cfg.PrefixV4 != nil {
|
||||
magicDNSDomains = append(magicDNSDomains, util.GenerateIPv4DNSRootDomain(*cfg.PrefixV4)...)
|
||||
magicDNSDomains = append(
|
||||
magicDNSDomains,
|
||||
util.GenerateIPv4DNSRootDomain(*cfg.PrefixV4)...)
|
||||
}
|
||||
if cfg.PrefixV6 != nil {
|
||||
magicDNSDomains = append(magicDNSDomains, util.GenerateIPv6DNSRootDomain(*cfg.PrefixV6)...)
|
||||
magicDNSDomains = append(
|
||||
magicDNSDomains,
|
||||
util.GenerateIPv6DNSRootDomain(*cfg.PrefixV6)...)
|
||||
}
|
||||
|
||||
// we might have routes already from Split DNS
|
||||
@@ -219,6 +213,14 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
)
|
||||
}
|
||||
|
||||
if cfg.DERP.ServerVerifyClients {
|
||||
t := http.DefaultTransport.(*http.Transport) //nolint:forcetypeassert
|
||||
t.RegisterProtocol(
|
||||
derpServer.DerpVerifyScheme,
|
||||
derpServer.NewDERPVerifyTransport(app.handleVerifyRequest),
|
||||
)
|
||||
}
|
||||
|
||||
embeddedDERPServer, err := derpServer.NewDERPServer(
|
||||
cfg.ServerURL,
|
||||
key.NodePrivate(*derpServerKey),
|
||||
@@ -245,11 +247,11 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
|
||||
lastExpiryCheck := time.Unix(0, 0)
|
||||
|
||||
derpTicker := time.NewTicker(h.cfg.DERP.UpdateFrequency)
|
||||
defer derpTicker.Stop()
|
||||
// If we dont want auto update, just stop the ticker
|
||||
if !h.cfg.DERP.AutoUpdate {
|
||||
derpTicker.Stop()
|
||||
derpTickerChan := make(<-chan time.Time)
|
||||
if h.cfg.DERP.AutoUpdate && h.cfg.DERP.UpdateFrequency != 0 {
|
||||
derpTicker := time.NewTicker(h.cfg.DERP.UpdateFrequency)
|
||||
defer derpTicker.Stop()
|
||||
derpTickerChan = derpTicker.C
|
||||
}
|
||||
|
||||
var extraRecordsUpdate <-chan []tailcfg.DNSRecord
|
||||
@@ -266,38 +268,29 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
return
|
||||
|
||||
case <-expireTicker.C:
|
||||
var update types.StateUpdate
|
||||
var expiredNodeChanges []change.ChangeSet
|
||||
var changed bool
|
||||
|
||||
if err := h.db.Write(func(tx *gorm.DB) error {
|
||||
lastExpiryCheck, update, changed = db.ExpireExpiredNodes(tx, lastExpiryCheck)
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Error().Err(err).Msg("database error while expiring nodes")
|
||||
continue
|
||||
}
|
||||
lastExpiryCheck, expiredNodeChanges, changed = h.state.ExpireExpiredNodes(lastExpiryCheck)
|
||||
|
||||
if changed {
|
||||
log.Trace().Interface("nodes", update.ChangePatches).Msgf("expiring nodes")
|
||||
log.Trace().Interface("changes", expiredNodeChanges).Msgf("expiring nodes")
|
||||
|
||||
ctx := types.NotifyCtx(context.Background(), "expire-expired", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, update)
|
||||
// Send the changes directly since they're already in the new format
|
||||
for _, nodeChange := range expiredNodeChanges {
|
||||
h.Change(nodeChange)
|
||||
}
|
||||
}
|
||||
|
||||
case <-derpTicker.C:
|
||||
case <-derpTickerChan:
|
||||
log.Info().Msg("Fetching DERPMap updates")
|
||||
h.DERPMap = derp.GetDERPMap(h.cfg.DERP)
|
||||
derpMap := derp.GetDERPMap(h.cfg.DERP)
|
||||
if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
|
||||
region, _ := h.DERPServer.GenerateRegion()
|
||||
h.DERPMap.Regions[region.RegionID] = ®ion
|
||||
derpMap.Regions[region.RegionID] = ®ion
|
||||
}
|
||||
|
||||
ctx := types.NotifyCtx(context.Background(), "derpmap-update", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StateDERPUpdated,
|
||||
DERPMap: h.DERPMap,
|
||||
})
|
||||
h.Change(change.DERPSet)
|
||||
|
||||
case records, ok := <-extraRecordsUpdate:
|
||||
if !ok {
|
||||
@@ -305,21 +298,16 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
}
|
||||
h.cfg.TailcfgDNSConfig.ExtraRecords = records
|
||||
|
||||
ctx := types.NotifyCtx(context.Background(), "dns-extrarecord", "all")
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
// TODO(kradalby): We can probably do better than sending a full update here,
|
||||
// but for now this will ensure that all of the nodes get the new records.
|
||||
Type: types.StateFullUpdate,
|
||||
})
|
||||
h.Change(change.ExtraRecordsSet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
||||
req interface{},
|
||||
req any,
|
||||
info *grpc.UnaryServerInfo,
|
||||
handler grpc.UnaryHandler,
|
||||
) (interface{}, error) {
|
||||
) (any, error) {
|
||||
// Check if the request is coming from the on-server client.
|
||||
// This is not secure, but it is to maintain maintainability
|
||||
// with the "legacy" database-based client
|
||||
@@ -357,7 +345,7 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
||||
)
|
||||
}
|
||||
|
||||
valid, err := h.db.ValidateAPIKey(strings.TrimPrefix(token, AuthPrefix))
|
||||
valid, err := h.state.ValidateAPIKey(strings.TrimPrefix(token, AuthPrefix))
|
||||
if err != nil {
|
||||
return ctx, status.Error(codes.Internal, "failed to validate token")
|
||||
}
|
||||
@@ -402,7 +390,7 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
return
|
||||
}
|
||||
|
||||
valid, err := h.db.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))
|
||||
valid, err := h.state.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
@@ -458,11 +446,13 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
||||
router := mux.NewRouter()
|
||||
router.Use(prometheusMiddleware)
|
||||
|
||||
router.HandleFunc(ts2021UpgradePath, h.NoiseUpgradeHandler).Methods(http.MethodPost, http.MethodGet)
|
||||
router.HandleFunc(ts2021UpgradePath, h.NoiseUpgradeHandler).
|
||||
Methods(http.MethodPost, http.MethodGet)
|
||||
|
||||
router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/register/{mkey}", h.authProvider.RegisterHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/register/{registration_id}", h.authProvider.RegisterHandler).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
if provider, ok := h.authProvider.(*AuthProviderOIDC); ok {
|
||||
router.HandleFunc("/oidc/callback", provider.OIDCCallbackHandler).Methods(http.MethodGet)
|
||||
@@ -483,7 +473,7 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
||||
router.HandleFunc("/derp", h.DERPServer.DERPHandler)
|
||||
router.HandleFunc("/derp/probe", derpServer.DERPProbeHandler)
|
||||
router.HandleFunc("/derp/latency-check", derpServer.DERPProbeHandler)
|
||||
router.HandleFunc("/bootstrap-dns", derpServer.DERPBootstrapDNSHandler(h.DERPMap))
|
||||
router.HandleFunc("/bootstrap-dns", derpServer.DERPBootstrapDNSHandler(h.state.DERPMap()))
|
||||
}
|
||||
|
||||
apiRouter := router.PathPrefix("/api").Subrouter()
|
||||
@@ -495,54 +485,10 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
||||
return router
|
||||
}
|
||||
|
||||
// TODO(kradalby): Do a variant of this, and polman which only updates the node that has changed.
|
||||
// Maybe we should attempt a new in memory state and not go via the DB?
|
||||
func usersChangedHook(db *db.HSDatabase, polMan policy.PolicyManager, notif *notifier.Notifier) error {
|
||||
users, err := db.ListUsers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
changed, err := polMan.SetUsers(users)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if changed {
|
||||
ctx := types.NotifyCtx(context.Background(), "acl-users-change", "all")
|
||||
notif.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StateFullUpdate,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO(kradalby): Do a variant of this, and polman which only updates the node that has changed.
|
||||
// Maybe we should attempt a new in memory state and not go via the DB?
|
||||
func nodesChangedHook(db *db.HSDatabase, polMan policy.PolicyManager, notif *notifier.Notifier) error {
|
||||
nodes, err := db.ListNodes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
changed, err := polMan.SetNodes(nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if changed {
|
||||
ctx := types.NotifyCtx(context.Background(), "acl-nodes-change", "all")
|
||||
notif.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StateFullUpdate,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serve launches the HTTP and gRPC server service Headscale and the API.
|
||||
func (h *Headscale) Serve() error {
|
||||
capver.CanOldCodeBeCleanedUp()
|
||||
|
||||
if profilingEnabled {
|
||||
if profilingPath != "" {
|
||||
err := os.MkdirAll(profilingPath, os.ModePerm)
|
||||
@@ -560,10 +506,16 @@ func (h *Headscale) Serve() error {
|
||||
spew.Dump(h.cfg)
|
||||
}
|
||||
|
||||
// Fetch an initial DERP Map before we start serving
|
||||
h.DERPMap = derp.GetDERPMap(h.cfg.DERP)
|
||||
h.mapper = mapper.NewMapper(h.db, h.cfg, h.DERPMap, h.nodeNotifier, h.polMan)
|
||||
log.Info().Str("version", types.Version).Str("commit", types.GitCommitHash).Msg("Starting Headscale")
|
||||
log.Info().
|
||||
Str("minimum_version", capver.TailscaleVersion(capver.MinSupportedCapabilityVersion)).
|
||||
Msg("Clients with a lower minimum version will be rejected")
|
||||
|
||||
h.mapBatcher = mapper.NewBatcherAndMapper(h.cfg, h.state)
|
||||
h.mapBatcher.Start()
|
||||
defer h.mapBatcher.Close()
|
||||
|
||||
// TODO(kradalby): fix state part.
|
||||
if h.cfg.DERP.ServerEnabled {
|
||||
// When embedded DERP is enabled we always need a STUN server
|
||||
if h.cfg.DERP.STUNAddr == "" {
|
||||
@@ -576,13 +528,13 @@ func (h *Headscale) Serve() error {
|
||||
}
|
||||
|
||||
if h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
|
||||
h.DERPMap.Regions[region.RegionID] = ®ion
|
||||
h.state.DERPMap().Regions[region.RegionID] = ®ion
|
||||
}
|
||||
|
||||
go h.DERPServer.ServeSTUN()
|
||||
}
|
||||
|
||||
if len(h.DERPMap.Regions) == 0 {
|
||||
if len(h.state.DERPMap().Regions) == 0 {
|
||||
return errEmptyInitialDERPMap
|
||||
}
|
||||
|
||||
@@ -591,7 +543,7 @@ func (h *Headscale) Serve() error {
|
||||
// around between restarts, they will reconnect and the GC will
|
||||
// be cancelled.
|
||||
go h.ephemeralGC.Start()
|
||||
ephmNodes, err := h.db.ListEphemeralNodes()
|
||||
ephmNodes, err := h.state.ListEphemeralNodes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list ephemeral nodes: %w", err)
|
||||
}
|
||||
@@ -714,12 +666,10 @@ func (h *Headscale) Serve() error {
|
||||
log.Info().Msgf("Enabling remote gRPC at %s", h.cfg.GRPCAddr)
|
||||
|
||||
grpcOptions := []grpc.ServerOption{
|
||||
grpc.UnaryInterceptor(
|
||||
grpcMiddleware.ChainUnaryServer(
|
||||
h.grpcAuthenticationInterceptor,
|
||||
// Uncomment to debug grpc communication.
|
||||
// zerolog.NewUnaryServerInterceptor(),
|
||||
),
|
||||
grpc.ChainUnaryInterceptor(
|
||||
h.grpcAuthenticationInterceptor,
|
||||
// Uncomment to debug grpc communication.
|
||||
// zerolog.NewUnaryServerInterceptor(),
|
||||
),
|
||||
}
|
||||
|
||||
@@ -781,26 +731,12 @@ func (h *Headscale) Serve() error {
|
||||
log.Info().
|
||||
Msgf("listening and serving HTTP on: %s", h.cfg.Addr)
|
||||
|
||||
debugMux := http.NewServeMux()
|
||||
debugMux.Handle("/debug/pprof/", http.DefaultServeMux)
|
||||
debugMux.HandleFunc("/debug/notifier", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(h.nodeNotifier.String()))
|
||||
})
|
||||
debugMux.Handle("/metrics", promhttp.Handler())
|
||||
|
||||
debugHTTPServer := &http.Server{
|
||||
Addr: h.cfg.MetricsAddr,
|
||||
Handler: debugMux,
|
||||
ReadTimeout: types.HTTPTimeout,
|
||||
WriteTimeout: 0,
|
||||
}
|
||||
|
||||
debugHTTPListener, err := net.Listen("tcp", h.cfg.MetricsAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bind to TCP address: %w", err)
|
||||
}
|
||||
|
||||
debugHTTPServer := h.debugHTTPServer()
|
||||
errorGroup.Go(func() error { return debugHTTPServer.Serve(debugHTTPListener) })
|
||||
|
||||
log.Info().
|
||||
@@ -836,30 +772,28 @@ func (h *Headscale) Serve() error {
|
||||
case syscall.SIGHUP:
|
||||
log.Info().
|
||||
Str("signal", sig.String()).
|
||||
Msg("Received SIGHUP, reloading ACL and Config")
|
||||
Msg("Received SIGHUP, reloading ACL policy")
|
||||
|
||||
if err := h.loadPolicyManager(); err != nil {
|
||||
log.Error().Err(err).Msg("failed to reload Policy")
|
||||
if h.cfg.Policy.IsEmpty() {
|
||||
continue
|
||||
}
|
||||
|
||||
pol, err := h.policyBytes()
|
||||
changed, err := h.state.ReloadPolicy()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to get policy blob")
|
||||
}
|
||||
|
||||
changed, err := h.polMan.SetPolicy(pol)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to set new policy")
|
||||
log.Error().Err(err).Msgf("reloading policy")
|
||||
continue
|
||||
}
|
||||
|
||||
if changed {
|
||||
log.Info().
|
||||
Msg("ACL policy successfully reloaded, notifying nodes of change")
|
||||
|
||||
ctx := types.NotifyCtx(context.Background(), "acl-sighup", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StateFullUpdate,
|
||||
})
|
||||
err = h.state.AutoApproveNodes()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to approve routes after new policy")
|
||||
}
|
||||
|
||||
h.Change(change.PolicySet)
|
||||
}
|
||||
default:
|
||||
info := func(msg string) { log.Info().Msg(msg) }
|
||||
@@ -885,7 +819,6 @@ func (h *Headscale) Serve() error {
|
||||
}
|
||||
|
||||
info("closing node notifier")
|
||||
h.nodeNotifier.Close()
|
||||
|
||||
info("waiting for netmap stream to close")
|
||||
h.pollNetMapStreamWG.Wait()
|
||||
@@ -916,7 +849,7 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
// Close db connections
|
||||
info("closing database connection")
|
||||
err = h.db.Close()
|
||||
err = h.state.Close()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to close db")
|
||||
}
|
||||
@@ -1016,13 +949,10 @@ func notFoundHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
body, _ := io.ReadAll(req.Body)
|
||||
|
||||
log.Trace().
|
||||
Interface("header", req.Header).
|
||||
Interface("proto", req.Proto).
|
||||
Interface("url", req.URL).
|
||||
Bytes("body", body).
|
||||
Msg("Request did not match")
|
||||
writer.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
@@ -1071,85 +1001,9 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
||||
return &machineKey, nil
|
||||
}
|
||||
|
||||
// policyBytes returns the appropriate policy for the
|
||||
// current configuration as a []byte array.
|
||||
func (h *Headscale) policyBytes() ([]byte, error) {
|
||||
switch h.cfg.Policy.Mode {
|
||||
case types.PolicyModeFile:
|
||||
path := h.cfg.Policy.Path
|
||||
|
||||
// It is fine to start headscale without a policy file.
|
||||
if len(path) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
absPath := util.AbsolutePathFromConfigPath(path)
|
||||
policyFile, err := os.Open(absPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer policyFile.Close()
|
||||
|
||||
return io.ReadAll(policyFile)
|
||||
|
||||
case types.PolicyModeDB:
|
||||
p, err := h.db.GetPolicy()
|
||||
if err != nil {
|
||||
if errors.Is(err, types.ErrPolicyNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []byte(p.Data), err
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unsupported policy mode: %s", h.cfg.Policy.Mode)
|
||||
}
|
||||
|
||||
func (h *Headscale) loadPolicyManager() error {
|
||||
var errOut error
|
||||
h.polManOnce.Do(func() {
|
||||
// Validate and reject configuration that would error when applied
|
||||
// when creating a map response. This requires nodes, so there is still
|
||||
// a scenario where they might be allowed if the server has no nodes
|
||||
// yet, but it should help for the general case and for hot reloading
|
||||
// configurations.
|
||||
// Note that this check is only done for file-based policies in this function
|
||||
// as the database-based policies are checked in the gRPC API where it is not
|
||||
// allowed to be written to the database.
|
||||
nodes, err := h.db.ListNodes()
|
||||
if err != nil {
|
||||
errOut = fmt.Errorf("loading nodes from database to validate policy: %w", err)
|
||||
return
|
||||
}
|
||||
users, err := h.db.ListUsers()
|
||||
if err != nil {
|
||||
errOut = fmt.Errorf("loading users from database to validate policy: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
pol, err := h.policyBytes()
|
||||
if err != nil {
|
||||
errOut = fmt.Errorf("loading policy bytes: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
h.polMan, err = policy.NewPolicyManager(pol, users, nodes)
|
||||
if err != nil {
|
||||
errOut = fmt.Errorf("creating policy manager: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(nodes) > 0 {
|
||||
_, err = h.polMan.SSHPolicy(nodes[0])
|
||||
if err != nil {
|
||||
errOut = fmt.Errorf("verifying SSH rules: %w", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return errOut
|
||||
// Change is used to send changes to nodes.
|
||||
// All change should be enqueued here and empty will be automatically
|
||||
// ignored.
|
||||
func (h *Headscale) Change(c change.ChangeSet) {
|
||||
h.mapBatcher.AddWork(c)
|
||||
}
|
||||
|
||||
@@ -2,16 +2,16 @@ package hscontrol
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/db"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/juanfont/headscale/hscontrol/types/change"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
@@ -20,692 +20,251 @@ import (
|
||||
|
||||
type AuthProvider interface {
|
||||
RegisterHandler(http.ResponseWriter, *http.Request)
|
||||
AuthURL(key.MachinePublic) string
|
||||
AuthURL(types.RegistrationID) string
|
||||
}
|
||||
|
||||
func logAuthFunc(
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (func(string), func(string), func(error, string)) {
|
||||
return func(msg string) {
|
||||
log.Info().
|
||||
Caller().
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Str("followup", registerRequest.Followup).
|
||||
Time("expiry", registerRequest.Expiry).
|
||||
Msg(msg)
|
||||
},
|
||||
func(msg string) {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Str("followup", registerRequest.Followup).
|
||||
Time("expiry", registerRequest.Expiry).
|
||||
Msg(msg)
|
||||
},
|
||||
func(err error, msg string) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Str("followup", registerRequest.Followup).
|
||||
Time("expiry", registerRequest.Expiry).
|
||||
Err(err).
|
||||
Msg(msg)
|
||||
}
|
||||
}
|
||||
|
||||
// handleRegister is the logic for registering a client.
|
||||
func (h *Headscale) handleRegister(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
ctx context.Context,
|
||||
regReq tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) {
|
||||
logInfo, logTrace, _ := logAuthFunc(regReq, machineKey)
|
||||
now := time.Now().UTC()
|
||||
logTrace("handleRegister called, looking up machine in DB")
|
||||
node, err := h.db.GetNodeByAnyKey(machineKey, regReq.NodeKey, regReq.OldNodeKey)
|
||||
logTrace("handleRegister database lookup has returned")
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
// If the node has AuthKey set, handle registration via PreAuthKeys
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
node, err := h.state.GetNodeByNodeKey(regReq.NodeKey)
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, fmt.Errorf("looking up node in database: %w", err)
|
||||
}
|
||||
|
||||
if node != nil {
|
||||
// If an existing node is trying to register with an auth key,
|
||||
// we need to validate the auth key even for existing nodes
|
||||
if regReq.Auth != nil && regReq.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(writer, regReq, machineKey)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the node is waiting for interactive login.
|
||||
//
|
||||
// TODO(juan): We could use this field to improve our protocol implementation,
|
||||
// and hold the request until the client closes it, or the interactive
|
||||
// login is completed (i.e., the user registers the node).
|
||||
// This is not implemented yet, as it is no strictly required. The only side-effect
|
||||
// is that the client will hammer headscale with requests until it gets a
|
||||
// successful RegisterResponse.
|
||||
if regReq.Followup != "" {
|
||||
logTrace("register request is a followup")
|
||||
if _, ok := h.registrationCache.Get(machineKey.String()); ok {
|
||||
logTrace("Node is waiting for interactive login")
|
||||
|
||||
select {
|
||||
case <-req.Context().Done():
|
||||
return
|
||||
case <-time.After(registrationHoldoff):
|
||||
h.handleNewNode(writer, regReq, machineKey)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logInfo("Node not found in database, creating new")
|
||||
|
||||
// The node did not have a key to authenticate, which means
|
||||
// that we rely on a method that calls back some how (OpenID or CLI)
|
||||
// We create the node and then keep it around until a callback
|
||||
// happens
|
||||
newNode := types.Node{
|
||||
MachineKey: machineKey,
|
||||
Hostname: regReq.Hostinfo.Hostname,
|
||||
NodeKey: regReq.NodeKey,
|
||||
LastSeen: &now,
|
||||
Expiry: &time.Time{},
|
||||
}
|
||||
|
||||
if !regReq.Expiry.IsZero() {
|
||||
logTrace("Non-zero expiry time requested")
|
||||
newNode.Expiry = ®Req.Expiry
|
||||
}
|
||||
|
||||
h.registrationCache.Set(
|
||||
machineKey.String(),
|
||||
newNode,
|
||||
)
|
||||
|
||||
h.handleNewNode(writer, regReq, machineKey)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// The node is already in the DB. This could mean one of the following:
|
||||
// - The node is authenticated and ready to /map
|
||||
// - We are doing a key refresh
|
||||
// - The node is logged out (or expired) and pending to be authorized. TODO(juan): We need to keep alive the connection here
|
||||
if node != nil {
|
||||
// (juan): For a while we had a bug where we were not storing the MachineKey for the nodes using the TS2021,
|
||||
// due to a misunderstanding of the protocol https://github.com/juanfont/headscale/issues/1054
|
||||
// So if we have a not valid MachineKey (but we were able to fetch the node with the NodeKeys), we update it.
|
||||
if err != nil || node.MachineKey.IsZero() {
|
||||
if err := h.db.NodeSetMachineKey(node, machineKey); err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "RegistrationHandler").
|
||||
Str("node", node.Hostname).
|
||||
Err(err).
|
||||
Msg("Error saving machine key to database")
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If the NodeKey stored in headscale is the same as the key presented in a registration
|
||||
// request, then we have a node that is either:
|
||||
// - Trying to log out (sending a expiry in the past)
|
||||
// - A valid, registered node, looking for /map
|
||||
// - Expired node wanting to reauthenticate
|
||||
if node.NodeKey.String() == regReq.NodeKey.String() {
|
||||
// The client sends an Expiry in the past if the client is requesting to expire the key (aka logout)
|
||||
// https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L648
|
||||
if !regReq.Expiry.IsZero() &&
|
||||
regReq.Expiry.UTC().Before(now) {
|
||||
h.handleNodeLogOut(writer, *node)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// If node is not expired, and it is register, we have a already accepted this node,
|
||||
// let it proceed with a valid registration
|
||||
if !node.IsExpired() {
|
||||
h.handleNodeWithValidRegistration(writer, *node)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The NodeKey we have matches OldNodeKey, which means this is a refresh after a key expiration
|
||||
if node.NodeKey.String() == regReq.OldNodeKey.String() &&
|
||||
!node.IsExpired() {
|
||||
h.handleNodeKeyRefresh(
|
||||
writer,
|
||||
regReq,
|
||||
*node,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// When logged out and reauthenticating with OIDC, the OldNodeKey is not passed, but the NodeKey has changed
|
||||
if node.NodeKey.String() != regReq.NodeKey.String() &&
|
||||
regReq.OldNodeKey.IsZero() && !node.IsExpired() {
|
||||
h.handleNodeKeyRefresh(
|
||||
writer,
|
||||
regReq,
|
||||
*node,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if regReq.Followup != "" {
|
||||
select {
|
||||
case <-req.Context().Done():
|
||||
return
|
||||
case <-time.After(registrationHoldoff):
|
||||
}
|
||||
}
|
||||
|
||||
// The node has expired or it is logged out
|
||||
h.handleNodeExpiredOrLoggedOut(writer, regReq, *node, machineKey)
|
||||
|
||||
// TODO(juan): RegisterRequest includes an Expiry time, that we could optionally use
|
||||
node.Expiry = &time.Time{}
|
||||
|
||||
// If we are here it means the client needs to be reauthorized,
|
||||
// we need to make sure the NodeKey matches the one in the request
|
||||
// TODO(juan): What happens when using fast user switching between two
|
||||
// headscale-managed tailnets?
|
||||
node.NodeKey = regReq.NodeKey
|
||||
h.registrationCache.Set(
|
||||
machineKey.String(),
|
||||
*node,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// handleAuthKey contains the logic to manage auth key client registration
|
||||
// When using Noise, the machineKey is Zero.
|
||||
func (h *Headscale) handleAuthKey(
|
||||
writer http.ResponseWriter,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) {
|
||||
log.Debug().
|
||||
Caller().
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Msgf("Processing auth key for %s", registerRequest.Hostinfo.Hostname)
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
pak, err := h.db.ValidatePreAuthKey(registerRequest.Auth.AuthKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed authentication via AuthKey")
|
||||
resp.MachineAuthorized = false
|
||||
|
||||
respBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusUnauthorized)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Msg("Failed authentication via AuthKey")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Caller().
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Msg("Authentication key was valid, proceeding to acquire IP addresses")
|
||||
|
||||
nodeKey := registerRequest.NodeKey
|
||||
|
||||
// retrieve node information if it exist
|
||||
// The error is not important, because if it does not
|
||||
// exist, then this is a new node and we will move
|
||||
// on to registration.
|
||||
node, _ := h.db.GetNodeByAnyKey(machineKey, registerRequest.NodeKey, registerRequest.OldNodeKey)
|
||||
if node != nil {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Msg("node was already registered before, refreshing with new auth key")
|
||||
|
||||
node.NodeKey = nodeKey
|
||||
if pak.ID != 0 {
|
||||
node.AuthKeyID = ptr.To(pak.ID)
|
||||
}
|
||||
|
||||
node.Expiry = ®isterRequest.Expiry
|
||||
node.User = pak.User
|
||||
node.UserID = pak.UserID
|
||||
err := h.db.DB.Save(node).Error
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Err(err).
|
||||
Msg("failed to save node after logging in with auth key")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
aclTags := pak.Proto().GetAclTags()
|
||||
if len(aclTags) > 0 {
|
||||
// This conditional preserves the existing behaviour, although SaaS would reset the tags on auth-key login
|
||||
err = h.db.SetTags(node.ID, aclTags)
|
||||
resp, err := h.handleRegisterWithAuthKey(regReq, machineKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Strs("aclTags", aclTags).
|
||||
Err(err).
|
||||
Msg("Failed to set tags after refreshing node")
|
||||
|
||||
return
|
||||
// Preserve HTTPError types so they can be handled properly by the HTTP layer
|
||||
var httpErr HTTPError
|
||||
if errors.As(err, &httpErr) {
|
||||
return nil, httpErr
|
||||
}
|
||||
return nil, fmt.Errorf("handling register with auth key for existing node: %w", err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
ctx := types.NotifyCtx(context.Background(), "handle-authkey", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{Type: types.StatePeerChanged, ChangeNodes: []types.NodeID{node.ID}})
|
||||
} else {
|
||||
now := time.Now().UTC()
|
||||
|
||||
nodeToRegister := types.Node{
|
||||
Hostname: registerRequest.Hostinfo.Hostname,
|
||||
UserID: pak.User.ID,
|
||||
User: pak.User,
|
||||
MachineKey: machineKey,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
Expiry: ®isterRequest.Expiry,
|
||||
NodeKey: nodeKey,
|
||||
LastSeen: &now,
|
||||
ForcedTags: pak.Proto().GetAclTags(),
|
||||
}
|
||||
|
||||
ipv4, ipv6, err := h.ipAlloc.Next()
|
||||
resp, err := h.handleExistingNode(node, regReq, machineKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "RegistrationHandler").
|
||||
Str("hostinfo.name", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("failed to allocate IP ")
|
||||
|
||||
return
|
||||
return nil, fmt.Errorf("handling existing node: %w", err)
|
||||
}
|
||||
|
||||
pakID := uint(pak.ID)
|
||||
if pakID != 0 {
|
||||
nodeToRegister.AuthKeyID = ptr.To(pak.ID)
|
||||
}
|
||||
node, err = h.db.RegisterNode(
|
||||
nodeToRegister,
|
||||
ipv4, ipv6,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("could not register node")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = nodesChangedHook(h.db, h.polMan, h.nodeNotifier)
|
||||
if err != nil {
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
err = h.db.Write(func(tx *gorm.DB) error {
|
||||
return db.UsePreAuthKey(tx, pak)
|
||||
})
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to use pre-auth key")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
if regReq.Followup != "" {
|
||||
return h.waitForFollowup(ctx, regReq)
|
||||
}
|
||||
|
||||
resp.MachineAuthorized = true
|
||||
resp.User = *pak.User.TailscaleUser()
|
||||
// Provide LoginName when registering with pre-auth key
|
||||
// Otherwise it will need to exec `tailscale up` twice to fetch the *LoginName*
|
||||
resp.Login = *pak.User.TailscaleLogin()
|
||||
|
||||
respBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Msg("Successfully authenticated via AuthKey")
|
||||
}
|
||||
|
||||
// handleNewNode returns the authorisation URL to the client based on what type
|
||||
// of registration headscale is configured with.
|
||||
// This url is then showed to the user by the local Tailscale client.
|
||||
func (h *Headscale) handleNewNode(
|
||||
writer http.ResponseWriter,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) {
|
||||
logInfo, logTrace, logErr := logAuthFunc(registerRequest, machineKey)
|
||||
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// The node registration is new, redirect the client to the registration URL
|
||||
logTrace("The node seems to be new, sending auth url")
|
||||
|
||||
resp.AuthURL = h.authProvider.AuthURL(machineKey)
|
||||
|
||||
respBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
logErr(err, "Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
logErr(err, "Failed to write response")
|
||||
}
|
||||
|
||||
logInfo(fmt.Sprintf("Successfully sent auth url: %s", resp.AuthURL))
|
||||
}
|
||||
|
||||
func (h *Headscale) handleNodeLogOut(
|
||||
writer http.ResponseWriter,
|
||||
node types.Node,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
log.Info().
|
||||
Str("node", node.Hostname).
|
||||
Msg("Client requested logout")
|
||||
|
||||
now := time.Now()
|
||||
err := h.db.NodeSetExpiry(node.ID, now)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to expire node")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx := types.NotifyCtx(context.Background(), "logout-expiry", "na")
|
||||
h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdateExpire(node.ID, now), node.ID)
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.MachineAuthorized = false
|
||||
resp.NodeKeyExpired = true
|
||||
resp.User = *node.User.TailscaleUser()
|
||||
respBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if node.IsEphemeral() {
|
||||
changedNodes, err := h.db.DeleteNode(&node, h.nodeNotifier.LikelyConnectedMap())
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("node", node.Hostname).
|
||||
Msg("Cannot delete ephemeral node from the database")
|
||||
}
|
||||
|
||||
ctx := types.NotifyCtx(context.Background(), "logout-ephemeral", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StatePeerRemoved,
|
||||
Removed: []types.NodeID{node.ID},
|
||||
})
|
||||
if changedNodes != nil {
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: changedNodes,
|
||||
})
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Msg("Successfully logged out")
|
||||
}
|
||||
|
||||
func (h *Headscale) handleNodeWithValidRegistration(
|
||||
writer http.ResponseWriter,
|
||||
node types.Node,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// The node registration is valid, respond with redirect to /map
|
||||
log.Debug().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Msg("Client is registered and we have the current NodeKey. All clear to /map")
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.MachineAuthorized = true
|
||||
resp.User = *node.User.TailscaleUser()
|
||||
resp.Login = *node.User.TailscaleLogin()
|
||||
|
||||
respBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Msg("Node successfully authorized")
|
||||
}
|
||||
|
||||
func (h *Headscale) handleNodeKeyRefresh(
|
||||
writer http.ResponseWriter,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
node types.Node,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
log.Info().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Msg("We have the OldNodeKey in the database. This is a key refresh")
|
||||
|
||||
err := h.db.Write(func(tx *gorm.DB) error {
|
||||
return db.NodeSetNodeKey(tx, &node, registerRequest.NodeKey)
|
||||
})
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to update machine key in the database")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.User = *node.User.TailscaleUser()
|
||||
respBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Caller().
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("old_node_key", registerRequest.OldNodeKey.ShortString()).
|
||||
Str("node", node.Hostname).
|
||||
Msg("Node key successfully refreshed")
|
||||
}
|
||||
|
||||
func (h *Headscale) handleNodeExpiredOrLoggedOut(
|
||||
writer http.ResponseWriter,
|
||||
regReq tailcfg.RegisterRequest,
|
||||
node types.Node,
|
||||
machineKey key.MachinePublic,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
if regReq.Auth != nil && regReq.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(writer, regReq, machineKey)
|
||||
resp, err := h.handleRegisterWithAuthKey(regReq, machineKey)
|
||||
if err != nil {
|
||||
// Preserve HTTPError types so they can be handled properly by the HTTP layer
|
||||
var httpErr HTTPError
|
||||
if errors.As(err, &httpErr) {
|
||||
return nil, httpErr
|
||||
}
|
||||
return nil, fmt.Errorf("handling register with auth key: %w", err)
|
||||
}
|
||||
|
||||
return
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// The client has registered before, but has expired or logged out
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("node_key", regReq.NodeKey.ShortString()).
|
||||
Str("node_key_old", regReq.OldNodeKey.ShortString()).
|
||||
Msg("Node registration has expired or logged out. Sending a auth url to register")
|
||||
|
||||
resp.AuthURL = h.authProvider.AuthURL(machineKey)
|
||||
|
||||
respBody, err := json.Marshal(resp)
|
||||
resp, err := h.handleRegisterInteractive(regReq, machineKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
return nil, fmt.Errorf("handling register interactive: %w", err)
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("node_key", regReq.NodeKey.ShortString()).
|
||||
Str("node_key_old", regReq.OldNodeKey.ShortString()).
|
||||
Str("node", node.Hostname).
|
||||
Msg("Node logged out. Sent AuthURL for reauthentication")
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) handleExistingNode(
|
||||
node *types.Node,
|
||||
regReq tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
|
||||
if node.MachineKey != machineKey {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "node exist with different machine key", nil)
|
||||
}
|
||||
|
||||
expired := node.IsExpired()
|
||||
|
||||
if !expired && !regReq.Expiry.IsZero() {
|
||||
requestExpiry := regReq.Expiry
|
||||
|
||||
// The client is trying to extend their key, this is not allowed.
|
||||
if requestExpiry.After(time.Now()) {
|
||||
return nil, NewHTTPError(http.StatusBadRequest, "extending key is not allowed", nil)
|
||||
}
|
||||
|
||||
// If the request expiry is in the past, we consider it a logout.
|
||||
if requestExpiry.Before(time.Now()) {
|
||||
if node.IsEphemeral() {
|
||||
c, err := h.state.DeleteNode(node)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("deleting ephemeral node: %w", err)
|
||||
}
|
||||
|
||||
h.Change(c)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
_, c, err := h.state.SetNodeExpiry(node.ID, requestExpiry)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting node expiry: %w", err)
|
||||
}
|
||||
|
||||
h.Change(c)
|
||||
}
|
||||
|
||||
return nodeToRegisterResponse(node), nil
|
||||
}
|
||||
|
||||
func nodeToRegisterResponse(node *types.Node) *tailcfg.RegisterResponse {
|
||||
return &tailcfg.RegisterResponse{
|
||||
// TODO(kradalby): Only send for user-owned nodes
|
||||
// and not tagged nodes when tags is working.
|
||||
User: *node.User.TailscaleUser(),
|
||||
Login: *node.User.TailscaleLogin(),
|
||||
NodeKeyExpired: node.IsExpired(),
|
||||
|
||||
// Headscale does not implement the concept of machine authorization
|
||||
// so we always return true here.
|
||||
// Revisit this if #2176 gets implemented.
|
||||
MachineAuthorized: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) waitForFollowup(
|
||||
ctx context.Context,
|
||||
regReq tailcfg.RegisterRequest,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
fu, err := url.Parse(regReq.Followup)
|
||||
if err != nil {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "invalid followup URL", err)
|
||||
}
|
||||
|
||||
followupReg, err := types.RegistrationIDFromString(strings.ReplaceAll(fu.Path, "/register/", ""))
|
||||
if err != nil {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "invalid registration ID", err)
|
||||
}
|
||||
|
||||
if reg, ok := h.state.GetRegistrationCacheEntry(followupReg); ok {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "registration timed out", err)
|
||||
case node := <-reg.Registered:
|
||||
if node == nil {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "node not found", nil)
|
||||
}
|
||||
return nodeToRegisterResponse(node), nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, NewHTTPError(http.StatusNotFound, "followup registration not found", nil)
|
||||
}
|
||||
|
||||
func (h *Headscale) handleRegisterWithAuthKey(
|
||||
regReq tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
node, changed, policyChanged, err := h.state.HandleNodeFromPreAuthKey(
|
||||
regReq,
|
||||
machineKey,
|
||||
)
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "invalid pre auth key", nil)
|
||||
}
|
||||
var perr types.PAKError
|
||||
if errors.As(err, &perr) {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, perr.Error(), nil)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If node is nil, it means an ephemeral node was deleted during logout
|
||||
if node == nil {
|
||||
h.Change(changed)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// This is a bit of a back and forth, but we have a bit of a chicken and egg
|
||||
// dependency here.
|
||||
// Because the way the policy manager works, we need to have the node
|
||||
// in the database, then add it to the policy manager and then we can
|
||||
// approve the route. This means we get this dance where the node is
|
||||
// first added to the database, then we add it to the policy manager via
|
||||
// nodesChangedHook and then we can auto approve the routes.
|
||||
// As that only approves the struct object, we need to save it again and
|
||||
// ensure we send an update.
|
||||
// This works, but might be another good candidate for doing some sort of
|
||||
// eventbus.
|
||||
// TODO(kradalby): This needs to be ran as part of the batcher maybe?
|
||||
// now since we dont update the node/pol here anymore
|
||||
routeChange := h.state.AutoApproveRoutes(node)
|
||||
if _, _, err := h.state.SaveNode(node); err != nil {
|
||||
return nil, fmt.Errorf("saving auto approved routes to node: %w", err)
|
||||
}
|
||||
|
||||
if routeChange && changed.Empty() {
|
||||
changed = change.NodeAdded(node.ID)
|
||||
}
|
||||
h.Change(changed)
|
||||
|
||||
// If policy changed due to node registration, send a separate policy change
|
||||
if policyChanged {
|
||||
policyChange := change.PolicyChange()
|
||||
h.Change(policyChange)
|
||||
}
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
MachineAuthorized: true,
|
||||
NodeKeyExpired: node.IsExpired(),
|
||||
User: *node.User.TailscaleUser(),
|
||||
Login: *node.User.TailscaleLogin(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) handleRegisterInteractive(
|
||||
regReq tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
registrationId, err := types.NewRegistrationID()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("generating registration ID: %w", err)
|
||||
}
|
||||
|
||||
nodeToRegister := types.RegisterNode{
|
||||
Node: types.Node{
|
||||
Hostname: regReq.Hostinfo.Hostname,
|
||||
MachineKey: machineKey,
|
||||
NodeKey: regReq.NodeKey,
|
||||
Hostinfo: regReq.Hostinfo,
|
||||
LastSeen: ptr.To(time.Now()),
|
||||
},
|
||||
Registered: make(chan *types.Node),
|
||||
}
|
||||
|
||||
if !regReq.Expiry.IsZero() {
|
||||
nodeToRegister.Node.Expiry = ®Req.Expiry
|
||||
}
|
||||
|
||||
h.state.SetRegistrationCacheEntry(
|
||||
registrationId,
|
||||
nodeToRegister,
|
||||
)
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
AuthURL: h.authProvider.AuthURL(registrationId),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
package hscontrol
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
// // NoiseRegistrationHandler handles the actual registration process of a node.
|
||||
func (ns *noiseServer) NoiseRegistrationHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
log.Trace().Caller().Msgf("Noise registration handler for client %s", req.RemoteAddr)
|
||||
if req.Method != http.MethodPost {
|
||||
http.Error(writer, "Wrong method", http.StatusMethodNotAllowed)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Any("headers", req.Header).
|
||||
Caller().
|
||||
Msg("Headers")
|
||||
|
||||
body, _ := io.ReadAll(req.Body)
|
||||
registerRequest := tailcfg.RegisterRequest{}
|
||||
if err := json.Unmarshal(body, ®isterRequest); err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot parse RegisterRequest")
|
||||
http.Error(writer, "Internal error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Reject unsupported versions
|
||||
if registerRequest.Version < MinimumCapVersion {
|
||||
log.Info().
|
||||
Caller().
|
||||
Int("min_version", int(MinimumCapVersion)).
|
||||
Int("client_version", int(registerRequest.Version)).
|
||||
Msg("unsupported client connected")
|
||||
http.Error(writer, "Internal error", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ns.nodeKey = registerRequest.NodeKey
|
||||
|
||||
ns.headscale.handleRegister(writer, req, registerRequest, ns.conn.Peer())
|
||||
}
|
||||
107
hscontrol/capver/capver.go
Normal file
107
hscontrol/capver/capver.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package capver
|
||||
|
||||
//go:generate go run ../../tools/capver/main.go
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
const MinSupportedCapabilityVersion tailcfg.CapabilityVersion = 90
|
||||
|
||||
// CanOldCodeBeCleanedUp is intended to be called on startup to see if
|
||||
// there are old code that can ble cleaned up, entries should contain
|
||||
// a CapVer where something can be cleaned up and a panic if it can.
|
||||
// This is only intended to catch things in tests.
|
||||
//
|
||||
// All uses of Capability version checks should be listed here.
|
||||
func CanOldCodeBeCleanedUp() {
|
||||
if MinSupportedCapabilityVersion >= 111 {
|
||||
panic("LegacyDERP can be cleaned up in tail.go")
|
||||
}
|
||||
}
|
||||
|
||||
func tailscaleVersSorted() []string {
|
||||
vers := xmaps.Keys(tailscaleToCapVer)
|
||||
sort.Strings(vers)
|
||||
return vers
|
||||
}
|
||||
|
||||
func capVersSorted() []tailcfg.CapabilityVersion {
|
||||
capVers := xmaps.Keys(capVerToTailscaleVer)
|
||||
slices.Sort(capVers)
|
||||
return capVers
|
||||
}
|
||||
|
||||
// TailscaleVersion returns the Tailscale version for the given CapabilityVersion.
|
||||
func TailscaleVersion(ver tailcfg.CapabilityVersion) string {
|
||||
return capVerToTailscaleVer[ver]
|
||||
}
|
||||
|
||||
// CapabilityVersion returns the CapabilityVersion for the given Tailscale version.
|
||||
func CapabilityVersion(ver string) tailcfg.CapabilityVersion {
|
||||
if !strings.HasPrefix(ver, "v") {
|
||||
ver = "v" + ver
|
||||
}
|
||||
return tailscaleToCapVer[ver]
|
||||
}
|
||||
|
||||
// TailscaleLatest returns the n latest Tailscale versions.
|
||||
func TailscaleLatest(n int) []string {
|
||||
if n <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tsSorted := tailscaleVersSorted()
|
||||
|
||||
if n > len(tsSorted) {
|
||||
return tsSorted
|
||||
}
|
||||
|
||||
return tsSorted[len(tsSorted)-n:]
|
||||
}
|
||||
|
||||
// TailscaleLatestMajorMinor returns the n latest Tailscale versions (e.g. 1.80).
|
||||
func TailscaleLatestMajorMinor(n int, stripV bool) []string {
|
||||
if n <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
majors := set.Set[string]{}
|
||||
for _, vers := range tailscaleVersSorted() {
|
||||
if stripV {
|
||||
vers = strings.TrimPrefix(vers, "v")
|
||||
}
|
||||
v := strings.Split(vers, ".")
|
||||
majors.Add(v[0] + "." + v[1])
|
||||
}
|
||||
|
||||
majorSl := majors.Slice()
|
||||
sort.Strings(majorSl)
|
||||
|
||||
if n > len(majorSl) {
|
||||
return majorSl
|
||||
}
|
||||
|
||||
return majorSl[len(majorSl)-n:]
|
||||
}
|
||||
|
||||
// CapVerLatest returns the n latest CapabilityVersions.
|
||||
func CapVerLatest(n int) []tailcfg.CapabilityVersion {
|
||||
if n <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
s := capVersSorted()
|
||||
|
||||
if n > len(s) {
|
||||
return s
|
||||
}
|
||||
|
||||
return s[len(s)-n:]
|
||||
}
|
||||
52
hscontrol/capver/capver_generated.go
Normal file
52
hscontrol/capver/capver_generated.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package capver
|
||||
|
||||
//Generated DO NOT EDIT
|
||||
|
||||
import "tailscale.com/tailcfg"
|
||||
|
||||
var tailscaleToCapVer = map[string]tailcfg.CapabilityVersion{
|
||||
"v1.64.0": 90,
|
||||
"v1.64.1": 90,
|
||||
"v1.64.2": 90,
|
||||
"v1.66.0": 95,
|
||||
"v1.66.1": 95,
|
||||
"v1.66.2": 95,
|
||||
"v1.66.3": 95,
|
||||
"v1.66.4": 95,
|
||||
"v1.68.0": 97,
|
||||
"v1.68.1": 97,
|
||||
"v1.68.2": 97,
|
||||
"v1.70.0": 102,
|
||||
"v1.72.0": 104,
|
||||
"v1.72.1": 104,
|
||||
"v1.74.0": 106,
|
||||
"v1.74.1": 106,
|
||||
"v1.76.0": 106,
|
||||
"v1.76.1": 106,
|
||||
"v1.76.6": 106,
|
||||
"v1.78.0": 109,
|
||||
"v1.78.1": 109,
|
||||
"v1.80.0": 113,
|
||||
"v1.80.1": 113,
|
||||
"v1.80.2": 113,
|
||||
"v1.80.3": 113,
|
||||
"v1.82.0": 115,
|
||||
"v1.82.5": 115,
|
||||
"v1.84.0": 116,
|
||||
"v1.84.1": 116,
|
||||
"v1.84.2": 116,
|
||||
}
|
||||
|
||||
|
||||
var capVerToTailscaleVer = map[tailcfg.CapabilityVersion]string{
|
||||
90: "v1.64.0",
|
||||
95: "v1.66.0",
|
||||
97: "v1.68.0",
|
||||
102: "v1.70.0",
|
||||
104: "v1.72.0",
|
||||
106: "v1.74.0",
|
||||
109: "v1.78.0",
|
||||
113: "v1.80.0",
|
||||
115: "v1.82.0",
|
||||
116: "v1.84.0",
|
||||
}
|
||||
65
hscontrol/capver/capver_test.go
Normal file
65
hscontrol/capver/capver_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package capver
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
func TestTailscaleLatestMajorMinor(t *testing.T) {
|
||||
tests := []struct {
|
||||
n int
|
||||
stripV bool
|
||||
expected []string
|
||||
}{
|
||||
{3, false, []string{"v1.80", "v1.82", "v1.84"}},
|
||||
{2, true, []string{"1.82", "1.84"}},
|
||||
// Lazy way to see all supported versions
|
||||
{10, true, []string{
|
||||
"1.66",
|
||||
"1.68",
|
||||
"1.70",
|
||||
"1.72",
|
||||
"1.74",
|
||||
"1.76",
|
||||
"1.78",
|
||||
"1.80",
|
||||
"1.82",
|
||||
"1.84",
|
||||
}},
|
||||
{0, false, nil},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
output := TailscaleLatestMajorMinor(test.n, test.stripV)
|
||||
if diff := cmp.Diff(output, test.expected); diff != "" {
|
||||
t.Errorf("TailscaleLatestMajorMinor(%d, %v) mismatch (-want +got):\n%s", test.n, test.stripV, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCapVerMinimumTailscaleVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
input tailcfg.CapabilityVersion
|
||||
expected string
|
||||
}{
|
||||
{90, "v1.64.0"},
|
||||
{95, "v1.66.0"},
|
||||
{106, "v1.74.0"},
|
||||
{109, "v1.78.0"},
|
||||
{9001, ""}, // Test case for a version higher than any in the map
|
||||
{60, ""}, // Test case for a version lower than any in the map
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
output := TailscaleVersion(test.input)
|
||||
if output != test.expected {
|
||||
t.Errorf("CapVerFromTailscaleVersion(%d) = %s; want %s", test.input, output, test.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -3,34 +3,50 @@ package db
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/glebarez/sqlite"
|
||||
"github.com/go-gormigrate/gormigrate/v2"
|
||||
"github.com/juanfont/headscale/hscontrol/db/sqliteconfig"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/tailscale/squibble"
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
"gorm.io/gorm/schema"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/util/set"
|
||||
"zgo.at/zcache/v2"
|
||||
)
|
||||
|
||||
//go:embed schema.sql
|
||||
var dbSchema string
|
||||
|
||||
func init() {
|
||||
schema.RegisterSerializer("text", TextSerialiser{})
|
||||
}
|
||||
|
||||
var errDatabaseNotSupported = errors.New("database type not supported")
|
||||
|
||||
var errForeignKeyConstraintsViolated = errors.New("foreign key constraints violated")
|
||||
|
||||
const (
|
||||
maxIdleConns = 100
|
||||
maxOpenConns = 100
|
||||
contextTimeoutSecs = 10
|
||||
)
|
||||
|
||||
// KV is a key-value store in a psql table. For future use...
|
||||
// TODO(kradalby): Is this used for anything?
|
||||
type KV struct {
|
||||
@@ -41,7 +57,7 @@ type KV struct {
|
||||
type HSDatabase struct {
|
||||
DB *gorm.DB
|
||||
cfg *types.DatabaseConfig
|
||||
regCache *zcache.Cache[string, types.Node]
|
||||
regCache *zcache.Cache[types.RegistrationID, types.RegisterNode]
|
||||
|
||||
baseDomain string
|
||||
}
|
||||
@@ -51,7 +67,7 @@ type HSDatabase struct {
|
||||
func NewHeadscaleDatabase(
|
||||
cfg types.DatabaseConfig,
|
||||
baseDomain string,
|
||||
regCache *zcache.Cache[string, types.Node],
|
||||
regCache *zcache.Cache[types.RegistrationID, types.RegisterNode],
|
||||
) (*HSDatabase, error) {
|
||||
dbConn, err := openDB(cfg)
|
||||
if err != nil {
|
||||
@@ -103,7 +119,7 @@ func NewHeadscaleDatabase(
|
||||
|
||||
dbConn.Model(&types.Node{}).Where("auth_key_id = ?", 0).Update("auth_key_id", nil)
|
||||
// If the Node table has a column for registered,
|
||||
// find all occourences of "false" and drop them. Then
|
||||
// find all occurrences of "false" and drop them. Then
|
||||
// remove the column.
|
||||
if tx.Migrator().HasColumn(&types.Node{}, "registered") {
|
||||
log.Info().
|
||||
@@ -469,6 +485,7 @@ func NewHeadscaleDatabase(
|
||||
|
||||
// Drop the old table.
|
||||
_ = tx.Migrator().DropTable(&preAuthKeyACLTag{})
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(db *gorm.DB) error { return nil },
|
||||
@@ -478,9 +495,41 @@ func NewHeadscaleDatabase(
|
||||
// populate the user with more interesting information.
|
||||
ID: "202407191627",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
// Fix an issue where the automigration in GORM expected a constraint to
|
||||
// exists that didn't, and add the one it wanted.
|
||||
// Fixes https://github.com/juanfont/headscale/issues/2351
|
||||
if cfg.Type == types.DatabasePostgres {
|
||||
err := tx.Exec(`
|
||||
BEGIN;
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_constraint
|
||||
WHERE conname = 'uni_users_name'
|
||||
) THEN
|
||||
ALTER TABLE users ADD CONSTRAINT uni_users_name UNIQUE (name);
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM pg_constraint
|
||||
WHERE conname = 'users_name_key'
|
||||
) THEN
|
||||
ALTER TABLE users DROP CONSTRAINT users_name_key;
|
||||
END IF;
|
||||
END $$;
|
||||
COMMIT;
|
||||
`).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to rename constraint: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err := tx.AutoMigrate(&types.User{})
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("automigrating types.User: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -495,7 +544,7 @@ func NewHeadscaleDatabase(
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
err := tx.AutoMigrate(&types.User{})
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("automigrating types.User: %w", err)
|
||||
}
|
||||
|
||||
// Set up indexes and unique constraints outside of GORM, it does not support
|
||||
@@ -521,6 +570,372 @@ func NewHeadscaleDatabase(
|
||||
},
|
||||
Rollback: func(db *gorm.DB) error { return nil },
|
||||
},
|
||||
{
|
||||
// Add a constraint to routes ensuring they cannot exist without a node.
|
||||
ID: "202501221827",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
// Remove any invalid routes associated with a node that does not exist.
|
||||
if tx.Migrator().HasTable(&types.Route{}) && tx.Migrator().HasTable(&types.Node{}) {
|
||||
err := tx.Exec("delete from routes where node_id not in (select id from nodes)").Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Remove any invalid routes without a node_id.
|
||||
if tx.Migrator().HasTable(&types.Route{}) {
|
||||
err := tx.Exec("delete from routes where node_id is null").Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := tx.AutoMigrate(&types.Route{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("automigrating types.Route: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(db *gorm.DB) error { return nil },
|
||||
},
|
||||
// Add back constraint so you cannot delete preauth keys that
|
||||
// is still used by a node.
|
||||
{
|
||||
ID: "202501311657",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
err := tx.AutoMigrate(&types.PreAuthKey{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("automigrating types.PreAuthKey: %w", err)
|
||||
}
|
||||
err = tx.AutoMigrate(&types.Node{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("automigrating types.Node: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(db *gorm.DB) error { return nil },
|
||||
},
|
||||
// Ensure there are no nodes referring to a deleted preauthkey.
|
||||
{
|
||||
ID: "202502070949",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
if tx.Migrator().HasTable(&types.PreAuthKey{}) {
|
||||
err := tx.Exec(`
|
||||
UPDATE nodes
|
||||
SET auth_key_id = NULL
|
||||
WHERE auth_key_id IS NOT NULL
|
||||
AND auth_key_id NOT IN (
|
||||
SELECT id FROM pre_auth_keys
|
||||
);
|
||||
`).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting auth_key to null on nodes with non-existing keys: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(db *gorm.DB) error { return nil },
|
||||
},
|
||||
// Migrate all routes from the Route table to the new field ApprovedRoutes
|
||||
// in the Node table. Then drop the Route table.
|
||||
{
|
||||
ID: "202502131714",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
if !tx.Migrator().HasColumn(&types.Node{}, "approved_routes") {
|
||||
err := tx.Migrator().AddColumn(&types.Node{}, "approved_routes")
|
||||
if err != nil {
|
||||
return fmt.Errorf("adding column types.Node: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
nodeRoutes := map[uint64][]netip.Prefix{}
|
||||
|
||||
var routes []types.Route
|
||||
err = tx.Find(&routes).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching routes: %w", err)
|
||||
}
|
||||
|
||||
for _, route := range routes {
|
||||
if route.Enabled {
|
||||
nodeRoutes[route.NodeID] = append(nodeRoutes[route.NodeID], route.Prefix)
|
||||
}
|
||||
}
|
||||
|
||||
for nodeID, routes := range nodeRoutes {
|
||||
tsaddr.SortPrefixes(routes)
|
||||
routes = slices.Compact(routes)
|
||||
|
||||
data, err := json.Marshal(routes)
|
||||
|
||||
err = tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", data).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("saving approved routes to new column: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Drop the old table.
|
||||
_ = tx.Migrator().DropTable(&types.Route{})
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(db *gorm.DB) error { return nil },
|
||||
},
|
||||
{
|
||||
ID: "202502171819",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
// This migration originally removed the last_seen column
|
||||
// from the node table, but it was added back in
|
||||
// 202505091439.
|
||||
return nil
|
||||
},
|
||||
Rollback: func(db *gorm.DB) error { return nil },
|
||||
},
|
||||
// Add back last_seen column to node table.
|
||||
{
|
||||
ID: "202505091439",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
// Add back last_seen column to node table if it does not exist.
|
||||
// This is a workaround for the fact that the last_seen column
|
||||
// was removed in the 202502171819 migration, but only for some
|
||||
// beta testers.
|
||||
if !tx.Migrator().HasColumn(&types.Node{}, "last_seen") {
|
||||
_ = tx.Migrator().AddColumn(&types.Node{}, "last_seen")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(db *gorm.DB) error { return nil },
|
||||
},
|
||||
// Fix the provider identifier for users that have a double slash in the
|
||||
// provider identifier.
|
||||
{
|
||||
ID: "202505141324",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
users, err := ListUsers(tx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing users: %w", err)
|
||||
}
|
||||
|
||||
for _, user := range users {
|
||||
user.ProviderIdentifier.String = types.CleanIdentifier(user.ProviderIdentifier.String)
|
||||
|
||||
err := tx.Save(user).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("saving user: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(db *gorm.DB) error { return nil },
|
||||
},
|
||||
// Schema migration to ensure all tables match the expected schema.
|
||||
// This migration recreates all tables to match the exact structure in schema.sql,
|
||||
// preserving all data during the process.
|
||||
// Only SQLite will be migrated for consistency.
|
||||
{
|
||||
ID: "202507021200",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
// Only run on SQLite
|
||||
if cfg.Type != types.DatabaseSqlite {
|
||||
log.Info().Msg("Skipping schema migration on non-SQLite database")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info().Msg("Starting schema recreation with table renaming")
|
||||
|
||||
// Rename existing tables to _old versions
|
||||
tablesToRename := []string{"users", "pre_auth_keys", "api_keys", "nodes", "policies"}
|
||||
|
||||
// Check if routes table exists and drop it (should have been migrated already)
|
||||
var routesExists bool
|
||||
err := tx.Raw("SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='routes'").Row().Scan(&routesExists)
|
||||
if err == nil && routesExists {
|
||||
log.Info().Msg("Dropping leftover routes table")
|
||||
if err := tx.Exec("DROP TABLE routes").Error; err != nil {
|
||||
return fmt.Errorf("dropping routes table: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Drop all indexes first to avoid conflicts
|
||||
indexesToDrop := []string{
|
||||
"idx_users_deleted_at",
|
||||
"idx_provider_identifier",
|
||||
"idx_name_provider_identifier",
|
||||
"idx_name_no_provider_identifier",
|
||||
"idx_api_keys_prefix",
|
||||
"idx_policies_deleted_at",
|
||||
}
|
||||
|
||||
for _, index := range indexesToDrop {
|
||||
_ = tx.Exec("DROP INDEX IF EXISTS " + index).Error
|
||||
}
|
||||
|
||||
for _, table := range tablesToRename {
|
||||
// Check if table exists before renaming
|
||||
var exists bool
|
||||
err := tx.Raw("SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name=?", table).Row().Scan(&exists)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking if table %s exists: %w", table, err)
|
||||
}
|
||||
|
||||
if exists {
|
||||
// Drop old table if it exists from previous failed migration
|
||||
_ = tx.Exec("DROP TABLE IF EXISTS " + table + "_old").Error
|
||||
|
||||
// Rename current table to _old
|
||||
if err := tx.Exec("ALTER TABLE " + table + " RENAME TO " + table + "_old").Error; err != nil {
|
||||
return fmt.Errorf("renaming table %s to %s_old: %w", table, table, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create new tables with correct schema
|
||||
tableCreationSQL := []string{
|
||||
`CREATE TABLE users(
|
||||
id integer PRIMARY KEY AUTOINCREMENT,
|
||||
name text,
|
||||
display_name text,
|
||||
email text,
|
||||
provider_identifier text,
|
||||
provider text,
|
||||
profile_pic_url text,
|
||||
created_at datetime,
|
||||
updated_at datetime,
|
||||
deleted_at datetime
|
||||
)`,
|
||||
`CREATE TABLE pre_auth_keys(
|
||||
id integer PRIMARY KEY AUTOINCREMENT,
|
||||
key text,
|
||||
user_id integer,
|
||||
reusable numeric,
|
||||
ephemeral numeric DEFAULT false,
|
||||
used numeric DEFAULT false,
|
||||
tags text,
|
||||
expiration datetime,
|
||||
created_at datetime,
|
||||
CONSTRAINT fk_pre_auth_keys_user FOREIGN KEY(user_id) REFERENCES users(id) ON DELETE SET NULL
|
||||
)`,
|
||||
`CREATE TABLE api_keys(
|
||||
id integer PRIMARY KEY AUTOINCREMENT,
|
||||
prefix text,
|
||||
hash blob,
|
||||
expiration datetime,
|
||||
last_seen datetime,
|
||||
created_at datetime
|
||||
)`,
|
||||
`CREATE TABLE nodes(
|
||||
id integer PRIMARY KEY AUTOINCREMENT,
|
||||
machine_key text,
|
||||
node_key text,
|
||||
disco_key text,
|
||||
endpoints text,
|
||||
host_info text,
|
||||
ipv4 text,
|
||||
ipv6 text,
|
||||
hostname text,
|
||||
given_name varchar(63),
|
||||
user_id integer,
|
||||
register_method text,
|
||||
forced_tags text,
|
||||
auth_key_id integer,
|
||||
last_seen datetime,
|
||||
expiry datetime,
|
||||
approved_routes text,
|
||||
created_at datetime,
|
||||
updated_at datetime,
|
||||
deleted_at datetime,
|
||||
CONSTRAINT fk_nodes_user FOREIGN KEY(user_id) REFERENCES users(id) ON DELETE CASCADE,
|
||||
CONSTRAINT fk_nodes_auth_key FOREIGN KEY(auth_key_id) REFERENCES pre_auth_keys(id)
|
||||
)`,
|
||||
`CREATE TABLE policies(
|
||||
id integer PRIMARY KEY AUTOINCREMENT,
|
||||
data text,
|
||||
created_at datetime,
|
||||
updated_at datetime,
|
||||
deleted_at datetime
|
||||
)`,
|
||||
}
|
||||
|
||||
for _, createSQL := range tableCreationSQL {
|
||||
if err := tx.Exec(createSQL).Error; err != nil {
|
||||
return fmt.Errorf("creating new table: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy data directly using SQL
|
||||
dataCopySQL := []string{
|
||||
`INSERT INTO users (id, name, display_name, email, provider_identifier, provider, profile_pic_url, created_at, updated_at, deleted_at)
|
||||
SELECT id, name, display_name, email, provider_identifier, provider, profile_pic_url, created_at, updated_at, deleted_at
|
||||
FROM users_old`,
|
||||
|
||||
`INSERT INTO pre_auth_keys (id, key, user_id, reusable, ephemeral, used, tags, expiration, created_at)
|
||||
SELECT id, key, user_id, reusable, ephemeral, used, tags, expiration, created_at
|
||||
FROM pre_auth_keys_old`,
|
||||
|
||||
`INSERT INTO api_keys (id, prefix, hash, expiration, last_seen, created_at)
|
||||
SELECT id, prefix, hash, expiration, last_seen, created_at
|
||||
FROM api_keys_old`,
|
||||
|
||||
`INSERT INTO nodes (id, machine_key, node_key, disco_key, endpoints, host_info, ipv4, ipv6, hostname, given_name, user_id, register_method, forced_tags, auth_key_id, last_seen, expiry, approved_routes, created_at, updated_at, deleted_at)
|
||||
SELECT id, machine_key, node_key, disco_key, endpoints, host_info, ipv4, ipv6, hostname, given_name, user_id, register_method, forced_tags, auth_key_id, last_seen, expiry, approved_routes, created_at, updated_at, deleted_at
|
||||
FROM nodes_old`,
|
||||
|
||||
`INSERT INTO policies (id, data, created_at, updated_at, deleted_at)
|
||||
SELECT id, data, created_at, updated_at, deleted_at
|
||||
FROM policies_old`,
|
||||
}
|
||||
|
||||
for _, copySQL := range dataCopySQL {
|
||||
if err := tx.Exec(copySQL).Error; err != nil {
|
||||
return fmt.Errorf("copying data: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create indexes
|
||||
indexes := []string{
|
||||
"CREATE INDEX idx_users_deleted_at ON users(deleted_at)",
|
||||
`CREATE UNIQUE INDEX idx_provider_identifier ON users(
|
||||
provider_identifier
|
||||
) WHERE provider_identifier IS NOT NULL`,
|
||||
`CREATE UNIQUE INDEX idx_name_provider_identifier ON users(
|
||||
name,
|
||||
provider_identifier
|
||||
)`,
|
||||
`CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users(
|
||||
name
|
||||
) WHERE provider_identifier IS NULL`,
|
||||
"CREATE UNIQUE INDEX idx_api_keys_prefix ON api_keys(prefix)",
|
||||
"CREATE INDEX idx_policies_deleted_at ON policies(deleted_at)",
|
||||
}
|
||||
|
||||
for _, indexSQL := range indexes {
|
||||
if err := tx.Exec(indexSQL).Error; err != nil {
|
||||
return fmt.Errorf("creating index: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Drop old tables only after everything succeeds
|
||||
for _, table := range tablesToRename {
|
||||
if err := tx.Exec("DROP TABLE IF EXISTS " + table + "_old").Error; err != nil {
|
||||
log.Warn().Str("table", table+"_old").Err(err).Msg("Failed to drop old table, but migration succeeded")
|
||||
}
|
||||
}
|
||||
|
||||
log.Info().Msg("Schema recreation completed successfully")
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(db *gorm.DB) error { return nil },
|
||||
},
|
||||
// From this point, the following rules must be followed:
|
||||
// - NEVER use gorm.AutoMigrate, write the exact migration steps needed
|
||||
// - AutoMigrate depends on the struct staying exactly the same, which it won't over time.
|
||||
// - Never write migrations that requires foreign keys to be disabled.
|
||||
},
|
||||
)
|
||||
|
||||
@@ -528,6 +943,30 @@ func NewHeadscaleDatabase(
|
||||
log.Fatal().Err(err).Msgf("Migration failed: %v", err)
|
||||
}
|
||||
|
||||
// Validate that the schema ends up in the expected state.
|
||||
// This is currently only done on sqlite as squibble does not
|
||||
// support Postgres and we use our sqlite schema as our source of
|
||||
// truth.
|
||||
if cfg.Type == types.DatabaseSqlite {
|
||||
sqlConn, err := dbConn.DB()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting DB from gorm: %w", err)
|
||||
}
|
||||
|
||||
// or else it blocks...
|
||||
sqlConn.SetMaxIdleConns(maxIdleConns)
|
||||
sqlConn.SetMaxOpenConns(maxOpenConns)
|
||||
defer sqlConn.SetMaxIdleConns(1)
|
||||
defer sqlConn.SetMaxOpenConns(1)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), contextTimeoutSecs*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := squibble.Validate(ctx, sqlConn, dbSchema); err != nil {
|
||||
return nil, fmt.Errorf("validating schema: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
db := HSDatabase{
|
||||
DB: dbConn,
|
||||
cfg: &cfg,
|
||||
@@ -561,34 +1000,28 @@ func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) {
|
||||
Str("path", cfg.Sqlite.Path).
|
||||
Msg("Opening database")
|
||||
|
||||
// Build SQLite configuration with pragmas set at connection time
|
||||
sqliteConfig := sqliteconfig.Default(cfg.Sqlite.Path)
|
||||
if cfg.Sqlite.WriteAheadLog {
|
||||
sqliteConfig.JournalMode = sqliteconfig.JournalModeWAL
|
||||
sqliteConfig.WALAutocheckpoint = cfg.Sqlite.WALAutoCheckPoint
|
||||
}
|
||||
|
||||
connectionURL, err := sqliteConfig.ToURL()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("building sqlite connection URL: %w", err)
|
||||
}
|
||||
|
||||
db, err := gorm.Open(
|
||||
sqlite.Open(cfg.Sqlite.Path),
|
||||
sqlite.Open(connectionURL),
|
||||
&gorm.Config{
|
||||
PrepareStmt: cfg.Gorm.PrepareStmt,
|
||||
Logger: dbLogger,
|
||||
},
|
||||
)
|
||||
|
||||
if err := db.Exec(`
|
||||
PRAGMA foreign_keys=ON;
|
||||
PRAGMA busy_timeout=10000;
|
||||
PRAGMA auto_vacuum=INCREMENTAL;
|
||||
PRAGMA synchronous=NORMAL;
|
||||
`).Error; err != nil {
|
||||
return nil, fmt.Errorf("enabling foreign keys: %w", err)
|
||||
}
|
||||
|
||||
if cfg.Sqlite.WriteAheadLog {
|
||||
if err := db.Exec(fmt.Sprintf(`
|
||||
PRAGMA journal_mode=WAL;
|
||||
PRAGMA wal_autocheckpoint=%d;
|
||||
`, cfg.Sqlite.WALAutoCheckPoint)).Error; err != nil {
|
||||
return nil, fmt.Errorf("setting WAL mode: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// The pure Go SQLite library does not handle locking in
|
||||
// the same way as the C based one and we cant use the gorm
|
||||
// the same way as the C based one and we can't use the gorm
|
||||
// connection pool as of 2022/02/23.
|
||||
sqlDB, _ := db.DB()
|
||||
sqlDB.SetMaxIdleConns(1)
|
||||
@@ -615,7 +1048,7 @@ func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) {
|
||||
dbString += " sslmode=disable"
|
||||
}
|
||||
} else {
|
||||
dbString += fmt.Sprintf(" sslmode=%s", cfg.Postgres.Ssl)
|
||||
dbString += " sslmode=" + cfg.Postgres.Ssl
|
||||
}
|
||||
|
||||
if cfg.Postgres.Port != 0 {
|
||||
@@ -623,7 +1056,7 @@ func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) {
|
||||
}
|
||||
|
||||
if cfg.Postgres.Pass != "" {
|
||||
dbString += fmt.Sprintf(" password=%s", cfg.Postgres.Pass)
|
||||
dbString += " password=" + cfg.Postgres.Pass
|
||||
}
|
||||
|
||||
db, err := gorm.Open(postgres.Open(dbString), &gorm.Config{
|
||||
@@ -651,29 +1084,84 @@ func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) {
|
||||
}
|
||||
|
||||
func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormigrate.Gormigrate) error {
|
||||
// Turn off foreign keys for the duration of the migration if using sqllite to
|
||||
// prevent data loss due to the way the GORM migrator handles certain schema
|
||||
// changes.
|
||||
if cfg.Type == types.DatabaseSqlite {
|
||||
var fkEnabled int
|
||||
if err := dbConn.Raw("PRAGMA foreign_keys").Scan(&fkEnabled).Error; err != nil {
|
||||
// SQLite: Run migrations step-by-step, only disabling foreign keys when necessary
|
||||
|
||||
// List of migration IDs that require foreign keys to be disabled
|
||||
// These are migrations that perform complex schema changes that GORM cannot handle safely with FK enabled
|
||||
// NO NEW MIGRATIONS SHOULD BE ADDED HERE. ALL NEW MIGRATIONS MUST RUN WITH FOREIGN KEYS ENABLED.
|
||||
migrationsRequiringFKDisabled := map[string]bool{
|
||||
"202312101416": true, // Initial migration with complex table/column renames
|
||||
"202402151347": true, // Migration that removes last_successful_update column
|
||||
"2024041121742": true, // Migration that changes IP address storage format
|
||||
"202407191627": true, // User table automigration with FK constraint issues
|
||||
"202408181235": true, // User table automigration with FK constraint issues
|
||||
"202501221827": true, // Route table automigration with FK constraint issues
|
||||
"202501311657": true, // PreAuthKey table automigration with FK constraint issues
|
||||
// Add other migration IDs here as they are identified to need FK disabled
|
||||
}
|
||||
|
||||
// Get the current foreign key status
|
||||
var fkOriginallyEnabled int
|
||||
if err := dbConn.Raw("PRAGMA foreign_keys").Scan(&fkOriginallyEnabled).Error; err != nil {
|
||||
return fmt.Errorf("checking foreign key status: %w", err)
|
||||
}
|
||||
if fkEnabled == 1 {
|
||||
if err := dbConn.Exec("PRAGMA foreign_keys = OFF").Error; err != nil {
|
||||
return fmt.Errorf("disabling foreign keys: %w", err)
|
||||
}
|
||||
defer dbConn.Exec("PRAGMA foreign_keys = ON")
|
||||
|
||||
// Get all migration IDs in order from the actual migration definitions
|
||||
// Only IDs that are in the migrationsRequiringFKDisabled map will be processed with FK disabled
|
||||
// any other new migrations are ran after.
|
||||
migrationIDs := []string{
|
||||
"202312101416",
|
||||
"202312101430",
|
||||
"202402151347",
|
||||
"2024041121742",
|
||||
"202406021630",
|
||||
"202407191627",
|
||||
"202408181235",
|
||||
"202409271400",
|
||||
"202501221827",
|
||||
"202501311657",
|
||||
"202502070949",
|
||||
"202502131714",
|
||||
"202502171819",
|
||||
"202505091439",
|
||||
"202505141324",
|
||||
// As of 2025-07-02, no new IDs should be added here.
|
||||
// They will be ran by the migrations.Migrate() call below.
|
||||
}
|
||||
}
|
||||
|
||||
if err := migrations.Migrate(); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, migrationID := range migrationIDs {
|
||||
log.Trace().Str("migration_id", migrationID).Msg("Running migration")
|
||||
needsFKDisabled := migrationsRequiringFKDisabled[migrationID]
|
||||
|
||||
// Since we disabled foreign keys for the migration, we need to check for
|
||||
// constraint violations manually at the end of the migration.
|
||||
if cfg.Type == types.DatabaseSqlite {
|
||||
if needsFKDisabled {
|
||||
// Disable foreign keys for this migration
|
||||
if err := dbConn.Exec("PRAGMA foreign_keys = OFF").Error; err != nil {
|
||||
return fmt.Errorf("disabling foreign keys for migration %s: %w", migrationID, err)
|
||||
}
|
||||
} else {
|
||||
// Ensure foreign keys are enabled for this migration
|
||||
if err := dbConn.Exec("PRAGMA foreign_keys = ON").Error; err != nil {
|
||||
return fmt.Errorf("enabling foreign keys for migration %s: %w", migrationID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run up to this specific migration (will only run the next pending migration)
|
||||
if err := migrations.MigrateTo(migrationID); err != nil {
|
||||
return fmt.Errorf("running migration %s: %w", migrationID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := dbConn.Exec("PRAGMA foreign_keys = ON").Error; err != nil {
|
||||
return fmt.Errorf("restoring foreign keys: %w", err)
|
||||
}
|
||||
|
||||
// Run the rest of the migrations
|
||||
if err := migrations.Migrate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for constraint violations at the end
|
||||
type constraintViolation struct {
|
||||
Table string
|
||||
RowID int
|
||||
@@ -707,7 +1195,12 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig
|
||||
Msg("Foreign key constraint violated")
|
||||
}
|
||||
|
||||
return fmt.Errorf("foreign key constraints violated")
|
||||
return errForeignKeyConstraintsViolated
|
||||
}
|
||||
} else {
|
||||
// PostgreSQL can run all migrations in one block - no foreign key issues
|
||||
if err := migrations.Migrate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -752,6 +1245,7 @@ func Read[T any](db *gorm.DB, fn func(rx *gorm.DB) (T, error)) (T, error) {
|
||||
var no T
|
||||
return no, err
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
@@ -773,5 +1267,6 @@ func Write[T any](db *gorm.DB, fn func(tx *gorm.DB) (T, error)) (T, error) {
|
||||
var no T
|
||||
return no, err
|
||||
}
|
||||
|
||||
return ret, tx.Commit().Error
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user