mirror of
https://github.com/juanfont/headscale.git
synced 2026-02-14 03:37:41 +01:00
Compare commits
1 Commits
kradalby/c
...
v0.25.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d5037c25a6 |
15
.coderabbit.yaml
Normal file
15
.coderabbit.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
|
||||
language: "en-GB"
|
||||
early_access: false
|
||||
reviews:
|
||||
profile: "chill"
|
||||
request_changes_workflow: false
|
||||
high_level_summary: true
|
||||
poem: true
|
||||
review_status: true
|
||||
collapse_walkthrough: false
|
||||
auto_review:
|
||||
enabled: true
|
||||
drafts: true
|
||||
chat:
|
||||
auto_reply: true
|
||||
@@ -17,8 +17,3 @@ LICENSE
|
||||
.vscode
|
||||
|
||||
*.sock
|
||||
|
||||
node_modules/
|
||||
package-lock.json
|
||||
package.json
|
||||
|
||||
|
||||
38
.github/ISSUE_TEMPLATE/bug_report.yaml
vendored
38
.github/ISSUE_TEMPLATE/bug_report.yaml
vendored
@@ -6,18 +6,14 @@ body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is this a support request?
|
||||
description:
|
||||
This issue tracker is for bugs and feature requests only. If you need
|
||||
help, please use ask in our Discord community
|
||||
description: This issue tracker is for bugs and feature requests only. If you need help, please use ask in our Discord community
|
||||
options:
|
||||
- label: This is not a support request
|
||||
required: true
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for this?
|
||||
description:
|
||||
Please search to see if an issue already exists for the bug you
|
||||
encountered.
|
||||
description: Please search to see if an issue already exists for the bug you encountered.
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
@@ -48,16 +44,10 @@ body:
|
||||
attributes:
|
||||
label: Environment
|
||||
description: |
|
||||
Please provide information about your environment.
|
||||
If you are using a container, always provide the headscale version and not only the Docker image version.
|
||||
Please do not put "latest".
|
||||
|
||||
If you are experiencing a problem during an upgrade, please provide the versions of the old and new versions of Headscale and Tailscale.
|
||||
|
||||
examples:
|
||||
- **OS**: Ubuntu 24.04
|
||||
- **Headscale version**: 0.24.3
|
||||
- **Tailscale version**: 1.80.0
|
||||
- **OS**: Ubuntu 20.04
|
||||
- **Headscale version**: 0.22.3
|
||||
- **Tailscale version**: 1.64.0
|
||||
value: |
|
||||
- OS:
|
||||
- Headscale version:
|
||||
@@ -75,27 +65,19 @@ body:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Debug information
|
||||
label: Anything else?
|
||||
description: |
|
||||
Links? References? Anything that will give us more context about the issue you are encountering.
|
||||
If **any** of these are omitted we will likely close your issue, do **not** ignore them.
|
||||
Links? References? Anything that will give us more context about the issue you are encountering!
|
||||
|
||||
- Client netmap dump (see below)
|
||||
- Policy configuration
|
||||
- ACL configuration
|
||||
- Headscale configuration
|
||||
- Headscale log (with `trace` enabled)
|
||||
|
||||
Dump the netmap of tailscale clients:
|
||||
`tailscale debug netmap > DESCRIPTIVE_NAME.json`
|
||||
|
||||
Dump the status of tailscale clients:
|
||||
`tailscale status --json > DESCRIPTIVE_NAME.json`
|
||||
|
||||
Get the logs of a Tailscale client that is not working as expected.
|
||||
`tailscale daemon-logs`
|
||||
Please provide information describing the netmap, which client, which headscale version etc.
|
||||
|
||||
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
||||
**Ensure** you use formatting for files you attach.
|
||||
Do **not** paste in long files.
|
||||
validations:
|
||||
required: true
|
||||
required: false
|
||||
|
||||
9
.github/ISSUE_TEMPLATE/feature_request.yaml
vendored
9
.github/ISSUE_TEMPLATE/feature_request.yaml
vendored
@@ -16,15 +16,13 @@ body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Description
|
||||
description:
|
||||
A clear and precise description of what new or changed feature you want.
|
||||
description: A clear and precise description of what new or changed feature you want.
|
||||
validations:
|
||||
required: true
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Contribution
|
||||
description:
|
||||
Are you willing to contribute to the implementation of this feature?
|
||||
description: Are you willing to contribute to the implementation of this feature?
|
||||
options:
|
||||
- label: I can write the design doc for this feature
|
||||
required: false
|
||||
@@ -33,7 +31,6 @@ body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: How can it be implemented?
|
||||
description:
|
||||
Free text for your ideas on how this feature could be implemented.
|
||||
description: Free text for your ideas on how this feature could be implemented.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
34
.github/workflows/build.yml
vendored
34
.github/workflows/build.yml
vendored
@@ -17,12 +17,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
@@ -31,15 +31,10 @@ jobs:
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Run nix build
|
||||
id: build
|
||||
@@ -57,7 +52,7 @@ jobs:
|
||||
exit $BUILD_STATUS
|
||||
|
||||
- name: Nix gosum diverging
|
||||
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
|
||||
uses: actions/github-script@v6
|
||||
if: failure() && steps.build.outcome == 'failure'
|
||||
with:
|
||||
github-token: ${{secrets.GITHUB_TOKEN}}
|
||||
@@ -69,7 +64,7 @@ jobs:
|
||||
body: 'Nix build failed with wrong gosum, please update "vendorSha256" (${{ steps.build.outputs.OLD_HASH }}) for the "headscale" package in flake.nix with the new SHA: ${{ steps.build.outputs.NEW_HASH }}'
|
||||
})
|
||||
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
name: headscale-linux
|
||||
@@ -88,20 +83,13 @@ jobs:
|
||||
- "GOARCH=arm64 GOOS=darwin"
|
||||
- "GOARCH=amd64 GOOS=darwin"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
- uses: actions/checkout@v4
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
|
||||
- name: Run go cross compile
|
||||
run:
|
||||
env ${{ matrix.env }} nix develop --command -- go build -o "headscale"
|
||||
./cmd/headscale
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
run: env ${{ matrix.env }} nix develop --command -- go build -o "headscale" ./cmd/headscale
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "headscale-${{ matrix.env }}"
|
||||
path: "headscale"
|
||||
|
||||
13
.github/workflows/check-tests.yaml
vendored
13
.github/workflows/check-tests.yaml
vendored
@@ -10,12 +10,12 @@ jobs:
|
||||
check-tests:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
@@ -24,15 +24,10 @@ jobs:
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Generate and check integration tests
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
|
||||
6
.github/workflows/docs-deploy.yml
vendored
6
.github/workflows/docs-deploy.yml
vendored
@@ -21,15 +21,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install python
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.x
|
||||
- name: Setup cache
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: ${{ github.ref }}
|
||||
path: .cache
|
||||
|
||||
6
.github/workflows/docs-test.yml
vendored
6
.github/workflows/docs-test.yml
vendored
@@ -11,13 +11,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
- name: Install python
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.x
|
||||
- name: Setup cache
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: ${{ github.ref }}
|
||||
path: .cache
|
||||
|
||||
@@ -38,14 +38,12 @@ func findTests() []string {
|
||||
return tests
|
||||
}
|
||||
|
||||
func updateYAML(tests []string, jobName string, testPath string) {
|
||||
func updateYAML(tests []string) {
|
||||
testsForYq := fmt.Sprintf("[%s]", strings.Join(tests, ", "))
|
||||
|
||||
yqCommand := fmt.Sprintf(
|
||||
"yq eval '.jobs.%s.strategy.matrix.test = %s' %s -i",
|
||||
jobName,
|
||||
"yq eval '.jobs.integration-test.strategy.matrix.test = %s' ./test-integration.yaml -i",
|
||||
testsForYq,
|
||||
testPath,
|
||||
)
|
||||
cmd := exec.Command("bash", "-c", yqCommand)
|
||||
|
||||
@@ -60,7 +58,7 @@ func updateYAML(tests []string, jobName string, testPath string) {
|
||||
log.Fatalf("failed to run yq command: %s", err)
|
||||
}
|
||||
|
||||
fmt.Printf("YAML file (%s) job %s updated successfully\n", testPath, jobName)
|
||||
fmt.Println("YAML file updated successfully")
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -71,21 +69,5 @@ func main() {
|
||||
quotedTests[i] = fmt.Sprintf("\"%s\"", test)
|
||||
}
|
||||
|
||||
// Define selected tests for PostgreSQL
|
||||
postgresTestNames := []string{
|
||||
"TestACLAllowUserDst",
|
||||
"TestPingAllByIP",
|
||||
"TestEphemeral2006DeletedTooQuickly",
|
||||
"TestPingAllByIPManyUpDown",
|
||||
"TestSubnetRouterMultiNetwork",
|
||||
}
|
||||
|
||||
quotedPostgresTests := make([]string, len(postgresTestNames))
|
||||
for i, test := range postgresTestNames {
|
||||
quotedPostgresTests[i] = fmt.Sprintf("\"%s\"", test)
|
||||
}
|
||||
|
||||
// Update both SQLite and PostgreSQL job matrices
|
||||
updateYAML(quotedTests, "sqlite", "./test-integration.yaml")
|
||||
updateYAML(quotedPostgresTests, "postgres", "./test-integration.yaml")
|
||||
updateYAML(quotedTests)
|
||||
}
|
||||
|
||||
4
.github/workflows/gh-actions-updater.yaml
vendored
4
.github/workflows/gh-actions-updater.yaml
vendored
@@ -11,13 +11,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
# [Required] Access token with `workflow` scope.
|
||||
token: ${{ secrets.WORKFLOW_SECRET }}
|
||||
|
||||
- name: Run GitHub Actions Version Updater
|
||||
uses: saadmk11/github-actions-version-updater@64be81ba69383f81f2be476703ea6570c4c8686e # v0.8.1
|
||||
uses: saadmk11/github-actions-version-updater@v0.8.1
|
||||
with:
|
||||
# [Required] Access token with `workflow` scope.
|
||||
token: ${{ secrets.WORKFLOW_SECRET }}
|
||||
|
||||
95
.github/workflows/integration-test-template.yml
vendored
95
.github/workflows/integration-test-template.yml
vendored
@@ -1,95 +0,0 @@
|
||||
name: Integration Test Template
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
test:
|
||||
required: true
|
||||
type: string
|
||||
postgres_flag:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
database_name:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
# Github does not allow us to access secrets in pull requests,
|
||||
# so this env var is used to check if we have the secret or not.
|
||||
# If we have the secrets, meaning we are running on push in a fork,
|
||||
# there might be secrets available for more debugging.
|
||||
# If TS_OAUTH_CLIENT_ID and TS_OAUTH_SECRET is set, then the job
|
||||
# will join a debug tailscale network, set up SSH and a tmux session.
|
||||
# The SSH will be configured to use the SSH key of the Github user
|
||||
# that triggered the build.
|
||||
HAS_TAILSCALE_SECRET: ${{ secrets.TS_OAUTH_CLIENT_ID }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
- '*.nix'
|
||||
- 'go.*'
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- name: Tailscale
|
||||
if: ${{ env.HAS_TAILSCALE_SECRET }}
|
||||
uses: tailscale/github-action@6986d2c82a91fbac2949fe01f5bab95cf21b5102 # v3.2.2
|
||||
with:
|
||||
oauth-client-id: ${{ secrets.TS_OAUTH_CLIENT_ID }}
|
||||
oauth-secret: ${{ secrets.TS_OAUTH_SECRET }}
|
||||
tags: tag:gh
|
||||
- name: Setup SSH server for Actor
|
||||
if: ${{ env.HAS_TAILSCALE_SECRET }}
|
||||
uses: alexellis/setup-sshd-actor@master
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
- name: Run Integration Test
|
||||
uses: Wandalen/wretry.action@e68c23e6309f2871ca8ae4763e7629b9c258e1ea # v3.8.0
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
# Our integration tests are started like a thundering herd, often
|
||||
# hitting limits of the various external repositories we depend on
|
||||
# like docker hub. This will retry jobs every 5 min, 10 times,
|
||||
# hopefully letting us avoid manual intervention and restarting jobs.
|
||||
# One could of course argue that we should invest in trying to avoid
|
||||
# this, but currently it seems like a larger investment to be cleverer
|
||||
# about this.
|
||||
# Some of the jobs might still require manual restart as they are really
|
||||
# slow and this will cause them to eventually be killed by Github actions.
|
||||
attempt_delay: 300000 # 5 min
|
||||
attempt_limit: 2
|
||||
command: |
|
||||
nix develop --command -- hi run "^${{ inputs.test }}$" \
|
||||
--timeout=120m \
|
||||
${{ inputs.postgres_flag }}
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: always() && steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
name: ${{ inputs.database_name }}-${{ inputs.test }}-logs
|
||||
path: "control_logs/*/*.log"
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: always() && steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
name: ${{ inputs.database_name }}-${{ inputs.test }}-archives
|
||||
path: "control_logs/*/*.tar"
|
||||
- name: Setup a blocking tmux session
|
||||
if: ${{ env.HAS_TAILSCALE_SECRET }}
|
||||
uses: alexellis/block-with-tmux-action@master
|
||||
44
.github/workflows/lint.yml
vendored
44
.github/workflows/lint.yml
vendored
@@ -10,12 +10,12 @@ jobs:
|
||||
golangci-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
@@ -24,31 +24,24 @@ jobs:
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: golangci-lint
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: nix develop --command -- golangci-lint run
|
||||
--new-from-rev=${{github.event.pull_request.base.sha}}
|
||||
--format=colored-line-number
|
||||
run: nix develop --command -- golangci-lint run --new-from-rev=${{github.event.pull_request.base.sha}} --out-format=colored-line-number
|
||||
|
||||
prettier-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
@@ -62,32 +55,21 @@ jobs:
|
||||
- '**/*.css'
|
||||
- '**/*.scss'
|
||||
- '**/*.html'
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Prettify code
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: nix develop --command -- prettier --no-error-on-unmatched-pattern
|
||||
--ignore-unknown --check **/*.{ts,js,md,yaml,yml,sass,css,scss,html}
|
||||
run: nix develop --command -- prettier --no-error-on-unmatched-pattern --ignore-unknown --check **/*.{ts,js,md,yaml,yml,sass,css,scss,html}
|
||||
|
||||
proto-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
- uses: actions/checkout@v4
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
|
||||
- name: Buf lint
|
||||
run: nix develop --command -- buf lint proto
|
||||
|
||||
15
.github/workflows/release.yml
vendored
15
.github/workflows/release.yml
vendored
@@ -13,30 +13,25 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
|
||||
- name: Run goreleaser
|
||||
run: nix develop --command -- goreleaser release --clean
|
||||
|
||||
10
.github/workflows/stale.yml
vendored
10
.github/workflows/stale.yml
vendored
@@ -12,17 +12,13 @@ jobs:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
days-before-issue-stale: 90
|
||||
days-before-issue-close: 7
|
||||
stale-issue-label: "stale"
|
||||
stale-issue-message:
|
||||
"This issue is stale because it has been open for 90 days with no
|
||||
activity."
|
||||
close-issue-message:
|
||||
"This issue was closed because it has been inactive for 14 days
|
||||
since being marked as stale."
|
||||
stale-issue-message: "This issue is stale because it has been open for 90 days with no activity."
|
||||
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
exempt-issue-labels: "no-stale-bot"
|
||||
|
||||
115
.github/workflows/test-integration.yaml
vendored
115
.github/workflows/test-integration.yaml
vendored
@@ -1,4 +1,4 @@
|
||||
name: integration
|
||||
name: Integration Tests
|
||||
# To debug locally on a branch, and when needing secrets
|
||||
# change this to include `push` so the build is ran on
|
||||
# the main repository.
|
||||
@@ -7,7 +7,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
jobs:
|
||||
sqlite:
|
||||
integration-test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -21,11 +22,8 @@ jobs:
|
||||
- TestACLNamedHostsCanReach
|
||||
- TestACLDevice1CanAccessDevice2
|
||||
- TestPolicyUpdateWhileRunningWithCLIInDatabase
|
||||
- TestACLAutogroupMember
|
||||
- TestACLAutogroupTagged
|
||||
- TestAuthKeyLogoutAndReloginSameUser
|
||||
- TestAuthKeyLogoutAndReloginNewUser
|
||||
- TestAuthKeyLogoutAndReloginSameUserExpiredKey
|
||||
- TestOIDCAuthenticationPingAll
|
||||
- TestOIDCExpireNodesBasedOnTokenExpiry
|
||||
- TestOIDC024UserCreation
|
||||
@@ -50,6 +48,7 @@ jobs:
|
||||
- TestDERPVerifyEndpoint
|
||||
- TestResolveMagicDNS
|
||||
- TestResolveMagicDNSExtraRecordsPath
|
||||
- TestValidateResolvConf
|
||||
- TestDERPServerScenario
|
||||
- TestDERPServerWebsocketScenario
|
||||
- TestPingAllByIP
|
||||
@@ -66,36 +65,92 @@ jobs:
|
||||
- Test2118DeletingOnlineNodePanics
|
||||
- TestEnablingRoutes
|
||||
- TestHASubnetRouterFailover
|
||||
- TestEnableDisableAutoApprovedRoute
|
||||
- TestAutoApprovedSubRoute2068
|
||||
- TestSubnetRouteACL
|
||||
- TestEnablingExitRoutes
|
||||
- TestSubnetRouterMultiNetwork
|
||||
- TestSubnetRouterMultiNetworkExitNode
|
||||
- TestAutoApproveMultiNetwork
|
||||
- TestSubnetRouteACLFiltering
|
||||
- TestHeadscale
|
||||
- TestCreateTailscale
|
||||
- TestTailscaleNodesJoiningHeadcale
|
||||
- TestSSHOneUserToAll
|
||||
- TestSSHMultipleUsersAllToAll
|
||||
- TestSSHNoSSHConfigured
|
||||
- TestSSHIsBlockedInACL
|
||||
- TestSSHUserOnlyIsolation
|
||||
uses: ./.github/workflows/integration-test-template.yml
|
||||
with:
|
||||
test: ${{ matrix.test }}
|
||||
postgres_flag: "--postgres=0"
|
||||
database_name: "sqlite"
|
||||
postgres:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
test:
|
||||
- TestACLAllowUserDst
|
||||
- TestPingAllByIP
|
||||
- TestEphemeral2006DeletedTooQuickly
|
||||
- TestPingAllByIPManyUpDown
|
||||
- TestSubnetRouterMultiNetwork
|
||||
uses: ./.github/workflows/integration-test-template.yml
|
||||
with:
|
||||
test: ${{ matrix.test }}
|
||||
postgres_flag: "--postgres=1"
|
||||
database_name: "postgres"
|
||||
database: [postgres, sqlite]
|
||||
env:
|
||||
# Github does not allow us to access secrets in pull requests,
|
||||
# so this env var is used to check if we have the secret or not.
|
||||
# If we have the secrets, meaning we are running on push in a fork,
|
||||
# there might be secrets available for more debugging.
|
||||
# If TS_OAUTH_CLIENT_ID and TS_OAUTH_SECRET is set, then the job
|
||||
# will join a debug tailscale network, set up SSH and a tmux session.
|
||||
# The SSH will be configured to use the SSH key of the Github user
|
||||
# that triggered the build.
|
||||
HAS_TAILSCALE_SECRET: ${{ secrets.TS_OAUTH_CLIENT_ID }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
- '*.nix'
|
||||
- 'go.*'
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- name: Tailscale
|
||||
if: ${{ env.HAS_TAILSCALE_SECRET }}
|
||||
uses: tailscale/github-action@v2
|
||||
with:
|
||||
oauth-client-id: ${{ secrets.TS_OAUTH_CLIENT_ID }}
|
||||
oauth-secret: ${{ secrets.TS_OAUTH_SECRET }}
|
||||
tags: tag:gh
|
||||
- name: Setup SSH server for Actor
|
||||
if: ${{ env.HAS_TAILSCALE_SECRET }}
|
||||
uses: alexellis/setup-sshd-actor@master
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: satackey/action-docker-layer-caching@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
continue-on-error: true
|
||||
- name: Run Integration Test
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
env:
|
||||
USE_POSTGRES: ${{ matrix.database == 'postgres' && '1' || '0' }}
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
--env HEADSCALE_INTEGRATION_POSTGRES=${{env.USE_POSTGRES}} \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^${{ matrix.test }}$"
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: always() && steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
name: ${{ matrix.test }}-${{matrix.database}}-logs
|
||||
path: "control_logs/*.log"
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: always() && steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
name: ${{ matrix.test }}-${{matrix.database}}-pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
- name: Setup a blocking tmux session
|
||||
if: ${{ env.HAS_TAILSCALE_SECRET }}
|
||||
uses: alexellis/block-with-tmux-action@master
|
||||
|
||||
13
.github/workflows/test.yml
vendored
13
.github/workflows/test.yml
vendored
@@ -11,13 +11,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
@@ -27,15 +27,10 @@ jobs:
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Run tests
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
|
||||
6
.github/workflows/update-flake.yml
vendored
6
.github/workflows/update-flake.yml
vendored
@@ -10,10 +10,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
- name: Install Nix
|
||||
uses: DeterminateSystems/nix-installer-action@21a544727d0c62386e78b4befe52d19ad12692e3 # v17
|
||||
uses: DeterminateSystems/nix-installer-action@main
|
||||
- name: Update flake.lock
|
||||
uses: DeterminateSystems/update-flake-lock@428c2b58a4b7414dabd372acb6a03dba1084d3ab # v25
|
||||
uses: DeterminateSystems/update-flake-lock@main
|
||||
with:
|
||||
pr-title: "Update flake.lock"
|
||||
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -20,9 +20,9 @@ vendor/
|
||||
|
||||
dist/
|
||||
/headscale
|
||||
config.json
|
||||
config.yaml
|
||||
config*.yaml
|
||||
!config-example.yaml
|
||||
derp.yaml
|
||||
*.hujson
|
||||
*.key
|
||||
@@ -46,9 +46,3 @@ integration_test/etc/config.dump.yaml
|
||||
/site
|
||||
|
||||
__debug_bin
|
||||
|
||||
|
||||
node_modules/
|
||||
package-lock.json
|
||||
package.json
|
||||
|
||||
|
||||
132
.golangci.yaml
132
.golangci.yaml
@@ -1,80 +1,70 @@
|
||||
---
|
||||
version: "2"
|
||||
run:
|
||||
timeout: 10m
|
||||
build-tags:
|
||||
- ts2019
|
||||
|
||||
issues:
|
||||
skip-dirs:
|
||||
- gen
|
||||
linters:
|
||||
default: all
|
||||
enable-all: true
|
||||
disable:
|
||||
- cyclop
|
||||
- depguard
|
||||
- dupl
|
||||
- exhaustruct
|
||||
- funlen
|
||||
|
||||
- revive
|
||||
- lll
|
||||
- gofmt
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- godox
|
||||
- interfacebloat
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- makezero
|
||||
- musttag
|
||||
- nestif
|
||||
- nolintlint
|
||||
- paralleltest
|
||||
- revive
|
||||
- funlen
|
||||
- tagliatelle
|
||||
- testpackage
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- wsl
|
||||
settings:
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- appendAssign
|
||||
- ifElseChain
|
||||
nlreturn:
|
||||
block-size: 4
|
||||
varnamelen:
|
||||
ignore-names:
|
||||
- err
|
||||
- db
|
||||
- id
|
||||
- ip
|
||||
- ok
|
||||
- c
|
||||
- tt
|
||||
- tx
|
||||
- rx
|
||||
- sb
|
||||
- wg
|
||||
- pr
|
||||
- p
|
||||
- p2
|
||||
ignore-type-assert-ok: true
|
||||
ignore-map-index-ok: true
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
- gen
|
||||
- godox
|
||||
- ireturn
|
||||
- execinquery
|
||||
- exhaustruct
|
||||
- nolintlint
|
||||
- musttag # causes issues with imported libs
|
||||
- depguard
|
||||
- exportloopref
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
- gen
|
||||
# We should strive to enable these:
|
||||
- wrapcheck
|
||||
- dupl
|
||||
- makezero
|
||||
- maintidx
|
||||
|
||||
# Limits the methods of an interface to 10. We have more in integration tests
|
||||
- interfacebloat
|
||||
|
||||
# We might want to enable this, but it might be a lot of work
|
||||
- cyclop
|
||||
- nestif
|
||||
- wsl # might be incompatible with gofumpt
|
||||
- testpackage
|
||||
- paralleltest
|
||||
|
||||
linters-settings:
|
||||
varnamelen:
|
||||
ignore-type-assert-ok: true
|
||||
ignore-map-index-ok: true
|
||||
ignore-names:
|
||||
- err
|
||||
- db
|
||||
- id
|
||||
- ip
|
||||
- ok
|
||||
- c
|
||||
- tt
|
||||
- tx
|
||||
- rx
|
||||
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- appendAssign
|
||||
# TODO(kradalby): Remove this
|
||||
- ifElseChain
|
||||
|
||||
nlreturn:
|
||||
block-size: 4
|
||||
|
||||
@@ -2,12 +2,11 @@
|
||||
version: 2
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy -compat=1.24
|
||||
- go mod tidy -compat=1.22
|
||||
- go mod vendor
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
draft: true
|
||||
|
||||
builds:
|
||||
- id: headscale
|
||||
@@ -28,17 +27,14 @@ builds:
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w
|
||||
- -X github.com/juanfont/headscale/hscontrol/types.Version={{ .Version }}
|
||||
- -X github.com/juanfont/headscale/hscontrol/types.GitCommitHash={{ .Commit }}
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
tags:
|
||||
- ts2019
|
||||
|
||||
archives:
|
||||
- id: golang-cross
|
||||
name_template: '{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ with .Mips }}_{{ . }}{{ end }}{{ if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}'
|
||||
formats:
|
||||
- binary
|
||||
format: binary
|
||||
|
||||
source:
|
||||
enabled: true
|
||||
@@ -57,22 +53,15 @@ nfpms:
|
||||
# List file contents: dpkg -c dist/headscale...deb
|
||||
# Package metadata: dpkg --info dist/headscale....deb
|
||||
#
|
||||
- ids:
|
||||
- builds:
|
||||
- headscale
|
||||
package_name: headscale
|
||||
priority: optional
|
||||
vendor: headscale
|
||||
maintainer: Kristoffer Dalby <kristoffer@dalby.cc>
|
||||
homepage: https://github.com/juanfont/headscale
|
||||
description: |-
|
||||
Open source implementation of the Tailscale control server.
|
||||
Headscale aims to implement a self-hosted, open source alternative to the
|
||||
Tailscale control server. Headscale's goal is to provide self-hosters and
|
||||
hobbyists with an open-source server they can use for their projects and
|
||||
labs. It implements a narrow scope, a single Tailscale network (tailnet),
|
||||
suitable for a personal use, or a small open-source organisation.
|
||||
license: BSD
|
||||
bindir: /usr/bin
|
||||
section: net
|
||||
formats:
|
||||
- deb
|
||||
contents:
|
||||
@@ -81,21 +70,15 @@ nfpms:
|
||||
type: config|noreplace
|
||||
file_info:
|
||||
mode: 0644
|
||||
- src: ./packaging/systemd/headscale.service
|
||||
- src: ./docs/packaging/headscale.systemd.service
|
||||
dst: /usr/lib/systemd/system/headscale.service
|
||||
- dst: /var/lib/headscale
|
||||
type: dir
|
||||
- src: LICENSE
|
||||
dst: /usr/share/doc/headscale/copyright
|
||||
- dst: /var/run/headscale
|
||||
type: dir
|
||||
scripts:
|
||||
postinstall: ./packaging/deb/postinst
|
||||
postremove: ./packaging/deb/postrm
|
||||
preremove: ./packaging/deb/prerm
|
||||
deb:
|
||||
lintian_overrides:
|
||||
- no-changelog # Our CHANGELOG.md uses a different formatting
|
||||
- no-manual-page
|
||||
- statically-linked-binary
|
||||
postinstall: ./docs/packaging/postinstall.sh
|
||||
postremove: ./docs/packaging/postremove.sh
|
||||
|
||||
kos:
|
||||
- id: ghcr
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
.github/workflows/test-integration-v2*
|
||||
docs/about/features.md
|
||||
docs/ref/configuration.md
|
||||
docs/ref/oidc.md
|
||||
docs/ref/remote-cli.md
|
||||
|
||||
276
CHANGELOG.md
276
CHANGELOG.md
@@ -2,277 +2,6 @@
|
||||
|
||||
## Next
|
||||
|
||||
### Database integrity improvements
|
||||
|
||||
This release includes a significant database migration that addresses longstanding
|
||||
issues with the database schema and data integrity that has accumulated over the
|
||||
years. The migration introduces a `schema.sql` file as the source of truth for
|
||||
the expected database schema to ensure new migrations that will cause divergence
|
||||
does not occur again.
|
||||
|
||||
These issues arose from a combination of factors discovered over time: SQLite
|
||||
foreign keys not being enforced for many early versions, all migrations being
|
||||
run in one large function until version 0.23.0, and inconsistent use of GORM's
|
||||
AutoMigrate feature. Moving forward, all new migrations will be explicit SQL
|
||||
operations rather than relying on GORM AutoMigrate, and foreign keys will be
|
||||
enforced throughout the migration process.
|
||||
|
||||
We are only improving SQLite databases with this change - PostgreSQL databases
|
||||
are not affected.
|
||||
|
||||
Please read the [PR description](https://github.com/juanfont/headscale/pull/2617)
|
||||
for more technical details about the issues and solutions.
|
||||
|
||||
**SQLite Database Backup Example:**
|
||||
```bash
|
||||
# Stop headscale
|
||||
systemctl stop headscale
|
||||
|
||||
# Backup sqlite database
|
||||
cp /var/lib/headscale/db.sqlite /var/lib/headscale/db.sqlite.backup
|
||||
|
||||
# Backup sqlite WAL/SHM files (if they exist)
|
||||
cp /var/lib/headscale/db.sqlite-wal /var/lib/headscale/db.sqlite-wal.backup
|
||||
cp /var/lib/headscale/db.sqlite-shm /var/lib/headscale/db.sqlite-shm.backup
|
||||
|
||||
# Start headscale (migration will run automatically)
|
||||
systemctl start headscale
|
||||
```
|
||||
|
||||
### BREAKING
|
||||
|
||||
- **CLI: Remove deprecated flags**
|
||||
- `--identifier` flag removed - use `--node` or `--user` instead
|
||||
- `--namespace` flag removed - use `--user` instead
|
||||
|
||||
**Command changes:**
|
||||
```bash
|
||||
# Before
|
||||
headscale nodes expire --identifier 123
|
||||
headscale nodes rename --identifier 123 new-name
|
||||
headscale nodes delete --identifier 123
|
||||
headscale nodes move --identifier 123 --user 456
|
||||
headscale nodes list-routes --identifier 123
|
||||
|
||||
# After
|
||||
headscale nodes expire --node 123
|
||||
headscale nodes rename --node 123 new-name
|
||||
headscale nodes delete --node 123
|
||||
headscale nodes move --node 123 --user 456
|
||||
headscale nodes list-routes --node 123
|
||||
|
||||
# Before
|
||||
headscale users destroy --identifier 123
|
||||
headscale users rename --identifier 123 --new-name john
|
||||
headscale users list --identifier 123
|
||||
|
||||
# After
|
||||
headscale users destroy --user 123
|
||||
headscale users rename --user 123 --new-name john
|
||||
headscale users list --user 123
|
||||
|
||||
# Before
|
||||
headscale nodes register --namespace myuser nodekey
|
||||
headscale nodes list --namespace myuser
|
||||
headscale preauthkeys create --namespace myuser
|
||||
|
||||
# After
|
||||
headscale nodes register --user myuser nodekey
|
||||
headscale nodes list --user myuser
|
||||
headscale preauthkeys create --user myuser
|
||||
```
|
||||
|
||||
- Policy: Zero or empty destination port is no longer allowed
|
||||
[#2606](https://github.com/juanfont/headscale/pull/2606)
|
||||
|
||||
### Changes
|
||||
|
||||
- **Database schema migration improvements for SQLite**
|
||||
[#2617](https://github.com/juanfont/headscale/pull/2617)
|
||||
- **IMPORTANT: Backup your SQLite database before upgrading**
|
||||
- Introduces safer table renaming migration strategy
|
||||
- Addresses longstanding database integrity issues
|
||||
|
||||
- Remove policy v1 code [#2600](https://github.com/juanfont/headscale/pull/2600)
|
||||
- Refactor Debian/Ubuntu packaging and drop support for Ubuntu 20.04.
|
||||
[#2614](https://github.com/juanfont/headscale/pull/2614)
|
||||
- Support client verify for DERP
|
||||
[#2046](https://github.com/juanfont/headscale/pull/2046)
|
||||
- Remove redundant check regarding `noise` config
|
||||
[#2658](https://github.com/juanfont/headscale/pull/2658)
|
||||
- Refactor OpenID Connect documentation
|
||||
[#2625](https://github.com/juanfont/headscale/pull/2625)
|
||||
- Don't crash if config file is missing
|
||||
[#2656](https://github.com/juanfont/headscale/pull/2656)
|
||||
|
||||
## 0.26.1 (2025-06-06)
|
||||
|
||||
### Changes
|
||||
|
||||
- Ensure nodes are matching both node key and machine key when connecting.
|
||||
[#2642](https://github.com/juanfont/headscale/pull/2642)
|
||||
|
||||
## 0.26.0 (2025-05-14)
|
||||
|
||||
### BREAKING
|
||||
|
||||
#### Routes
|
||||
|
||||
Route internals have been rewritten, removing the dedicated route table in the
|
||||
database. This was done to simplify the codebase, which had grown unnecessarily
|
||||
complex after the routes were split into separate tables. The overhead of having
|
||||
to go via the database and keeping the state in sync made the code very hard to
|
||||
reason about and prone to errors. The majority of the route state is only
|
||||
relevant when headscale is running, and is now only kept in memory. As part of
|
||||
this, the CLI and API has been simplified to reflect the changes;
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | ts-head-ruqsg8 | | 0.0.0.0/0, ::/0 |
|
||||
2 | ts-unstable-fq7ob4 | | 0.0.0.0/0, ::/0 |
|
||||
|
||||
$ headscale nodes approve-routes --identifier 1 --routes 0.0.0.0/0,::/0
|
||||
Node updated
|
||||
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | ts-head-ruqsg8 | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0
|
||||
2 | ts-unstable-fq7ob4 | | 0.0.0.0/0, ::/0 |
|
||||
```
|
||||
|
||||
Note that if an exit route is approved (0.0.0.0/0 or ::/0), both IPv4 and IPv6
|
||||
will be approved.
|
||||
|
||||
- Route API and CLI has been removed
|
||||
[#2422](https://github.com/juanfont/headscale/pull/2422)
|
||||
- Routes are now managed via the Node API
|
||||
[#2422](https://github.com/juanfont/headscale/pull/2422)
|
||||
- Only routes accessible to the node will be sent to the node
|
||||
[#2561](https://github.com/juanfont/headscale/pull/2561)
|
||||
|
||||
#### Policy v2
|
||||
|
||||
This release introduces a new policy implementation. The new policy is a
|
||||
complete rewrite, and it introduces some significant quality and consistency
|
||||
improvements. In principle, there are not really any new features, but some long
|
||||
standing bugs should have been resolved, or be easier to fix in the future. The
|
||||
new policy code passes all of our tests.
|
||||
|
||||
**Changes**
|
||||
|
||||
- The policy is validated and "resolved" when loading, providing errors for
|
||||
invalid rules and conditions.
|
||||
- Previously this was done as a mix between load and runtime (when it was
|
||||
applied to a node).
|
||||
- This means that when you convert the first time, what was previously a
|
||||
policy that loaded, but failed at runtime, will now fail at load time.
|
||||
- Error messages should be more descriptive and informative.
|
||||
- There is still work to be here, but it is already improved with "typing"
|
||||
(e.g. only Users can be put in Groups)
|
||||
- All users must contain an `@` character.
|
||||
- If your user naturally contains and `@`, like an email, this will just work.
|
||||
- If its based on usernames, or other identifiers not containing an `@`, an
|
||||
`@` should be appended at the end. For example, if your user is `john`, it
|
||||
must be written as `john@` in the policy.
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Migration notes when the policy is stored in the database.</summary>
|
||||
|
||||
This section **only** applies if the policy is stored in the database and
|
||||
Headscale 0.26 doesn't start due to a policy error
|
||||
(`failed to load ACL policy`).
|
||||
|
||||
- Start Headscale 0.26 with the environment variable `HEADSCALE_POLICY_V1=1`
|
||||
set. You can check that Headscale picked up the environment variable by
|
||||
observing this message during startup: `Using policy manager version: 1`
|
||||
- Dump the policy to a file: `headscale policy get > policy.json`
|
||||
- Edit `policy.json` and migrate to policy V2. Use the command
|
||||
`headscale policy check --file policy.json` to check for policy errors.
|
||||
- Load the modified policy: `headscale policy set --file policy.json`
|
||||
- Restart Headscale **without** the environment variable `HEADSCALE_POLICY_V1`.
|
||||
Headscale should now print the message `Using policy manager version: 2` and
|
||||
startup successfully.
|
||||
|
||||
</details>
|
||||
|
||||
**SSH**
|
||||
|
||||
The SSH policy has been reworked to be more consistent with the rest of the
|
||||
policy. In addition, several inconsistencies between our implementation and
|
||||
Tailscale's upstream has been closed and this might be a breaking change for
|
||||
some users. Please refer to the
|
||||
[upstream documentation](https://tailscale.com/kb/1337/acl-syntax#tailscale-ssh)
|
||||
for more information on which types are allowed in `src`, `dst` and `users`.
|
||||
|
||||
There is one large inconsistency left, we allow `*` as a destination as we
|
||||
currently do not support `autogroup:self`, `autogroup:member` and
|
||||
`autogroup:tagged`. The support for `*` will be removed when we have support for
|
||||
the autogroups.
|
||||
|
||||
**Current state**
|
||||
|
||||
The new policy is passing all tests, both integration and unit tests. This does
|
||||
not mean it is perfect, but it is a good start. Corner cases that is currently
|
||||
working in v1 and not tested might be broken in v2 (and vice versa).
|
||||
|
||||
**We do need help testing this code**
|
||||
|
||||
#### Other breaking changes
|
||||
|
||||
- Disallow `server_url` and `base_domain` to be equal
|
||||
[#2544](https://github.com/juanfont/headscale/pull/2544)
|
||||
- Return full user in API for pre auth keys instead of string
|
||||
[#2542](https://github.com/juanfont/headscale/pull/2542)
|
||||
- Pre auth key API/CLI now uses ID over username
|
||||
[#2542](https://github.com/juanfont/headscale/pull/2542)
|
||||
- A non-empty list of global nameservers needs to be specified via
|
||||
`dns.nameservers.global` if the configuration option `dns.override_local_dns`
|
||||
is enabled or is not specified in the configuration file. This aligns with
|
||||
behaviour of tailscale.com.
|
||||
[#2438](https://github.com/juanfont/headscale/pull/2438)
|
||||
|
||||
### Changes
|
||||
|
||||
- Use Go 1.24 [#2427](https://github.com/juanfont/headscale/pull/2427)
|
||||
- Add `headscale policy check` command to check policy
|
||||
[#2553](https://github.com/juanfont/headscale/pull/2553)
|
||||
- `oidc.map_legacy_users` and `oidc.strip_email_domain` has been removed
|
||||
[#2411](https://github.com/juanfont/headscale/pull/2411)
|
||||
- Add more information to `/debug` endpoint
|
||||
[#2420](https://github.com/juanfont/headscale/pull/2420)
|
||||
- It is now possible to inspect running goroutines and take profiles
|
||||
- View of config, policy, filter, ssh policy per node, connected nodes and
|
||||
DERPmap
|
||||
- OIDC: Fetch UserInfo to get EmailVerified if necessary
|
||||
[#2493](https://github.com/juanfont/headscale/pull/2493)
|
||||
- If a OIDC provider doesn't include the `email_verified` claim in its ID
|
||||
tokens, Headscale will attempt to get it from the UserInfo endpoint.
|
||||
- OIDC: Try to populate name, email and username from UserInfo
|
||||
[#2545](https://github.com/juanfont/headscale/pull/2545)
|
||||
- Improve performance by only querying relevant nodes from the database for node
|
||||
updates [#2509](https://github.com/juanfont/headscale/pull/2509)
|
||||
- node FQDNs in the netmap will now contain a dot (".") at the end. This aligns
|
||||
with behaviour of tailscale.com
|
||||
[#2503](https://github.com/juanfont/headscale/pull/2503)
|
||||
- Restore support for "Override local DNS"
|
||||
[#2438](https://github.com/juanfont/headscale/pull/2438)
|
||||
- Add documentation for routes
|
||||
[#2496](https://github.com/juanfont/headscale/pull/2496)
|
||||
- Add support for `autogroup:member`, `autogroup:tagged`
|
||||
[#2572](https://github.com/juanfont/headscale/pull/2572)
|
||||
|
||||
## 0.25.1 (2025-02-25)
|
||||
|
||||
### Changes
|
||||
|
||||
- Fix issue where registration errors are sent correctly
|
||||
[#2435](https://github.com/juanfont/headscale/pull/2435)
|
||||
- Fix issue where routes passed on registration were not saved
|
||||
[#2444](https://github.com/juanfont/headscale/pull/2444)
|
||||
- Fix issue where registration page was displayed twice
|
||||
[#2445](https://github.com/juanfont/headscale/pull/2445)
|
||||
|
||||
## 0.25.0 (2025-02-11)
|
||||
|
||||
@@ -288,7 +17,7 @@ working in v1 and not tested might be broken in v2 (and vice versa).
|
||||
- A logged out node logging in with the same user will replace the existing
|
||||
node.
|
||||
- Remove support for Tailscale clients older than 1.62 (Capability version 87)
|
||||
[#2405](https://github.com/juanfont/headscale/pull/2405)
|
||||
[#2405](https://github.com/juanfont/headscale/pull/2405)
|
||||
|
||||
### Changes
|
||||
|
||||
@@ -308,13 +37,10 @@ working in v1 and not tested might be broken in v2 (and vice versa).
|
||||
[#2396](https://github.com/juanfont/headscale/pull/2396)
|
||||
- Rehaul HTTP errors, return better status code and errors to users
|
||||
[#2398](https://github.com/juanfont/headscale/pull/2398)
|
||||
- Print headscale version and commit on server startup
|
||||
[#2415](https://github.com/juanfont/headscale/pull/2415)
|
||||
|
||||
## 0.24.3 (2025-02-07)
|
||||
|
||||
### Changes
|
||||
|
||||
- Fix migration error caused by nodes having invalid auth keys
|
||||
[#2412](https://github.com/juanfont/headscale/pull/2412)
|
||||
- Pre auth keys belonging to a user are no longer deleted with the user
|
||||
|
||||
395
CLAUDE.md
395
CLAUDE.md
@@ -1,395 +0,0 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Overview
|
||||
|
||||
Headscale is an open-source implementation of the Tailscale control server written in Go. It provides self-hosted coordination for Tailscale networks (tailnets), managing node registration, IP allocation, policy enforcement, and DERP routing.
|
||||
|
||||
## Development Commands
|
||||
|
||||
### Quick Setup
|
||||
```bash
|
||||
# Recommended: Use Nix for dependency management
|
||||
nix develop
|
||||
|
||||
# Full development workflow
|
||||
make dev # runs fmt + lint + test + build
|
||||
```
|
||||
|
||||
### Essential Commands
|
||||
```bash
|
||||
# Build headscale binary
|
||||
make build
|
||||
|
||||
# Run tests
|
||||
make test
|
||||
go test ./... # All unit tests
|
||||
go test -race ./... # With race detection
|
||||
|
||||
# Run specific integration test
|
||||
go run ./cmd/hi run "TestName" --postgres
|
||||
|
||||
# Code formatting and linting
|
||||
make fmt # Format all code (Go, docs, proto)
|
||||
make lint # Lint all code (Go, proto)
|
||||
make fmt-go # Format Go code only
|
||||
make lint-go # Lint Go code only
|
||||
|
||||
# Protocol buffer generation (after modifying proto/)
|
||||
make generate
|
||||
|
||||
# Clean build artifacts
|
||||
make clean
|
||||
```
|
||||
|
||||
### Integration Testing
|
||||
```bash
|
||||
# Use the hi (Headscale Integration) test runner
|
||||
go run ./cmd/hi doctor # Check system requirements
|
||||
go run ./cmd/hi run "TestPattern" # Run specific test
|
||||
go run ./cmd/hi run "TestPattern" --postgres # With PostgreSQL backend
|
||||
|
||||
# Test artifacts are saved to control_logs/ with logs and debug data
|
||||
```
|
||||
|
||||
## Project Structure & Architecture
|
||||
|
||||
### Top-Level Organization
|
||||
|
||||
```
|
||||
headscale/
|
||||
├── cmd/ # Command-line applications
|
||||
│ ├── headscale/ # Main headscale server binary
|
||||
│ └── hi/ # Headscale Integration test runner
|
||||
├── hscontrol/ # Core control plane logic
|
||||
├── integration/ # End-to-end Docker-based tests
|
||||
├── proto/ # Protocol buffer definitions
|
||||
├── gen/ # Generated code (protobuf)
|
||||
├── docs/ # Documentation
|
||||
└── packaging/ # Distribution packaging
|
||||
```
|
||||
|
||||
### Core Packages (`hscontrol/`)
|
||||
|
||||
**Main Server (`hscontrol/`)**
|
||||
- `app.go`: Application setup, dependency injection, server lifecycle
|
||||
- `handlers.go`: HTTP/gRPC API endpoints for management operations
|
||||
- `grpcv1.go`: gRPC service implementation for headscale API
|
||||
- `poll.go`: **Critical** - Handles Tailscale MapRequest/MapResponse protocol
|
||||
- `noise.go`: Noise protocol implementation for secure client communication
|
||||
- `auth.go`: Authentication flows (web, OIDC, command-line)
|
||||
- `oidc.go`: OpenID Connect integration for user authentication
|
||||
|
||||
**State Management (`hscontrol/state/`)**
|
||||
- `state.go`: Central coordinator for all subsystems (database, policy, IP allocation, DERP)
|
||||
- `node_store.go`: **Performance-critical** - In-memory cache with copy-on-write semantics
|
||||
- Thread-safe operations with deadlock detection
|
||||
- Coordinates between database persistence and real-time operations
|
||||
|
||||
**Database Layer (`hscontrol/db/`)**
|
||||
- `db.go`: Database abstraction, GORM setup, migration management
|
||||
- `node.go`: Node lifecycle, registration, expiration, IP assignment
|
||||
- `users.go`: User management, namespace isolation
|
||||
- `api_key.go`: API authentication tokens
|
||||
- `preauth_keys.go`: Pre-authentication keys for automated node registration
|
||||
- `ip.go`: IP address allocation and management
|
||||
- `policy.go`: Policy storage and retrieval
|
||||
- Schema migrations in `schema.sql` with extensive test data coverage
|
||||
|
||||
**Policy Engine (`hscontrol/policy/`)**
|
||||
- `policy.go`: Core ACL evaluation logic, HuJSON parsing
|
||||
- `v2/`: Next-generation policy system with improved filtering
|
||||
- `matcher/`: ACL rule matching and evaluation engine
|
||||
- Determines peer visibility, route approval, and network access rules
|
||||
- Supports both file-based and database-stored policies
|
||||
|
||||
**Network Management (`hscontrol/`)**
|
||||
- `derp/`: DERP (Designated Encrypted Relay for Packets) server implementation
|
||||
- NAT traversal when direct connections fail
|
||||
- Fallback relay for firewall-restricted environments
|
||||
- `mapper/`: Converts internal Headscale state to Tailscale's wire protocol format
|
||||
- `tail.go`: Tailscale-specific data structure generation
|
||||
- `routes/`: Subnet route management and primary route selection
|
||||
- `dns/`: DNS record management and MagicDNS implementation
|
||||
|
||||
**Utilities & Support (`hscontrol/`)**
|
||||
- `types/`: Core data structures, configuration, validation
|
||||
- `util/`: Helper functions for networking, DNS, key management
|
||||
- `templates/`: Client configuration templates (Apple, Windows, etc.)
|
||||
- `notifier/`: Event notification system for real-time updates
|
||||
- `metrics.go`: Prometheus metrics collection
|
||||
- `capver/`: Tailscale capability version management
|
||||
|
||||
### Key Subsystem Interactions
|
||||
|
||||
**Node Registration Flow**
|
||||
1. **Client Connection**: `noise.go` handles secure protocol handshake
|
||||
2. **Authentication**: `auth.go` validates credentials (web/OIDC/preauth)
|
||||
3. **State Creation**: `state.go` coordinates IP allocation via `db/ip.go`
|
||||
4. **Storage**: `db/node.go` persists node, `NodeStore` caches in memory
|
||||
5. **Network Setup**: `mapper/` generates initial Tailscale network map
|
||||
|
||||
**Ongoing Operations**
|
||||
1. **Poll Requests**: `poll.go` receives periodic client updates
|
||||
2. **State Updates**: `NodeStore` maintains real-time node information
|
||||
3. **Policy Application**: `policy/` evaluates ACL rules for peer relationships
|
||||
4. **Map Distribution**: `mapper/` sends network topology to all affected clients
|
||||
|
||||
**Route Management**
|
||||
1. **Advertisement**: Clients announce routes via `poll.go` Hostinfo updates
|
||||
2. **Storage**: `db/` persists routes, `NodeStore` caches for performance
|
||||
3. **Approval**: `policy/` auto-approves routes based on ACL rules
|
||||
4. **Distribution**: `routes/` selects primary routes, `mapper/` distributes to peers
|
||||
|
||||
### Command-Line Tools (`cmd/`)
|
||||
|
||||
**Main Server (`cmd/headscale/`)**
|
||||
- `headscale.go`: CLI parsing, configuration loading, server startup
|
||||
- Supports daemon mode, CLI operations (user/node management), database operations
|
||||
|
||||
**Integration Test Runner (`cmd/hi/`)**
|
||||
- `main.go`: Test execution framework with Docker orchestration
|
||||
- `run.go`: Individual test execution with artifact collection
|
||||
- `doctor.go`: System requirements validation
|
||||
- `docker.go`: Container lifecycle management
|
||||
- Essential for validating changes against real Tailscale clients
|
||||
|
||||
### Generated & External Code
|
||||
|
||||
**Protocol Buffers (`proto/` → `gen/`)**
|
||||
- Defines gRPC API for headscale management operations
|
||||
- Client libraries can generate from these definitions
|
||||
- Run `make generate` after modifying `.proto` files
|
||||
|
||||
**Integration Testing (`integration/`)**
|
||||
- `scenario.go`: Docker test environment setup
|
||||
- `tailscale.go`: Tailscale client container management
|
||||
- Individual test files for specific functionality areas
|
||||
- Real end-to-end validation with network isolation
|
||||
|
||||
### Critical Performance Paths
|
||||
|
||||
**High-Frequency Operations**
|
||||
1. **MapRequest Processing** (`poll.go`): Every 15-60 seconds per client
|
||||
2. **NodeStore Reads** (`node_store.go`): Every operation requiring node data
|
||||
3. **Policy Evaluation** (`policy/`): On every peer relationship calculation
|
||||
4. **Route Lookups** (`routes/`): During network map generation
|
||||
|
||||
**Database Write Patterns**
|
||||
- **Frequent**: Node heartbeats, endpoint updates, route changes
|
||||
- **Moderate**: User operations, policy updates, API key management
|
||||
- **Rare**: Schema migrations, bulk operations
|
||||
|
||||
### Configuration & Deployment
|
||||
|
||||
**Configuration** (`hscontrol/types/config.go`)**
|
||||
- Database connection settings (SQLite/PostgreSQL)
|
||||
- Network configuration (IP ranges, DNS settings)
|
||||
- Policy mode (file vs database)
|
||||
- DERP relay configuration
|
||||
- OIDC provider settings
|
||||
|
||||
**Key Dependencies**
|
||||
- **GORM**: Database ORM with migration support
|
||||
- **Tailscale Libraries**: Core networking and protocol code
|
||||
- **Zerolog**: Structured logging throughout the application
|
||||
- **Buf**: Protocol buffer toolchain for code generation
|
||||
|
||||
### Development Workflow Integration
|
||||
|
||||
The architecture supports incremental development:
|
||||
- **Unit Tests**: Focus on individual packages (`*_test.go` files)
|
||||
- **Integration Tests**: Validate cross-component interactions
|
||||
- **Database Tests**: Extensive migration and data integrity validation
|
||||
- **Policy Tests**: ACL rule evaluation and edge cases
|
||||
- **Performance Tests**: NodeStore and high-frequency operation validation
|
||||
|
||||
## Integration Test System
|
||||
|
||||
### Overview
|
||||
Integration tests use Docker containers running real Tailscale clients against a Headscale server. Tests validate end-to-end functionality including routing, ACLs, node lifecycle, and network coordination.
|
||||
|
||||
### Running Integration Tests
|
||||
|
||||
**System Requirements**
|
||||
```bash
|
||||
# Check if your system is ready
|
||||
go run ./cmd/hi doctor
|
||||
```
|
||||
This verifies Docker, Go, required images, and disk space.
|
||||
|
||||
**Test Execution Patterns**
|
||||
```bash
|
||||
# Run a single test (recommended for development)
|
||||
go run ./cmd/hi run "TestSubnetRouterMultiNetwork"
|
||||
|
||||
# Run with PostgreSQL backend (for database-heavy tests)
|
||||
go run ./cmd/hi run "TestExpireNode" --postgres
|
||||
|
||||
# Run multiple tests with pattern matching
|
||||
go run ./cmd/hi run "TestSubnet*"
|
||||
|
||||
# Run all integration tests (CI/full validation)
|
||||
go test ./integration -timeout 30m
|
||||
```
|
||||
|
||||
**Test Categories & Timing**
|
||||
- **Fast tests** (< 2 min): Basic functionality, CLI operations
|
||||
- **Medium tests** (2-5 min): Route management, ACL validation
|
||||
- **Slow tests** (5+ min): Node expiration, HA failover
|
||||
- **Long-running tests** (10+ min): `TestNodeOnlineStatus` (12 min duration)
|
||||
|
||||
### Test Infrastructure
|
||||
|
||||
**Docker Setup**
|
||||
- Headscale server container with configurable database backend
|
||||
- Multiple Tailscale client containers with different versions
|
||||
- Isolated networks per test scenario
|
||||
- Automatic cleanup after test completion
|
||||
|
||||
**Test Artifacts**
|
||||
All test runs save artifacts to `control_logs/TIMESTAMP-ID/`:
|
||||
```
|
||||
control_logs/20250713-213106-iajsux/
|
||||
├── hs-testname-abc123.stderr.log # Headscale server logs
|
||||
├── hs-testname-abc123.stdout.log
|
||||
├── hs-testname-abc123.db # Database snapshot
|
||||
├── hs-testname-abc123_metrics.txt # Prometheus metrics
|
||||
├── hs-testname-abc123-mapresponses/ # Protocol debug data
|
||||
├── ts-client-xyz789.stderr.log # Tailscale client logs
|
||||
├── ts-client-xyz789.stdout.log
|
||||
└── ts-client-xyz789_status.json # Client status dump
|
||||
```
|
||||
|
||||
### Test Development Guidelines
|
||||
|
||||
**Timing Considerations**
|
||||
Integration tests involve real network operations and Docker container lifecycle:
|
||||
|
||||
```go
|
||||
// ❌ Wrong: Immediate assertions after async operations
|
||||
client.Execute([]string{"tailscale", "set", "--advertise-routes=10.0.0.0/24"})
|
||||
nodes, _ := headscale.ListNodes()
|
||||
require.Len(t, nodes[0].GetAvailableRoutes(), 1) // May fail due to timing
|
||||
|
||||
// ✅ Correct: Wait for async operations to complete
|
||||
client.Execute([]string{"tailscale", "set", "--advertise-routes=10.0.0.0/24"})
|
||||
require.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes[0].GetAvailableRoutes(), 1)
|
||||
}, 10*time.Second, 100*time.Millisecond, "route should be advertised")
|
||||
```
|
||||
|
||||
**Common Test Patterns**
|
||||
- **Route Advertisement**: Use `EventuallyWithT` for route propagation
|
||||
- **Node State Changes**: Wait for NodeStore synchronization
|
||||
- **ACL Policy Changes**: Allow time for policy recalculation
|
||||
- **Network Connectivity**: Use ping tests with retries
|
||||
|
||||
**Test Data Management**
|
||||
```go
|
||||
// Node identification: Don't assume array ordering
|
||||
expectedRoutes := map[string]string{"1": "10.33.0.0/16"}
|
||||
for _, node := range nodes {
|
||||
nodeIDStr := fmt.Sprintf("%d", node.GetId())
|
||||
if route, shouldHaveRoute := expectedRoutes[nodeIDStr]; shouldHaveRoute {
|
||||
// Test the node that should have the route
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Troubleshooting Integration Tests
|
||||
|
||||
**Common Failure Patterns**
|
||||
1. **Timing Issues**: Test assertions run before async operations complete
|
||||
- **Solution**: Use `EventuallyWithT` with appropriate timeouts
|
||||
- **Timeout Guidelines**: 3-5s for route operations, 10s for complex scenarios
|
||||
|
||||
2. **Infrastructure Problems**: Disk space, Docker issues, network conflicts
|
||||
- **Check**: `go run ./cmd/hi doctor` for system health
|
||||
- **Clean**: Remove old test containers and networks
|
||||
|
||||
3. **NodeStore Synchronization**: Tests expecting immediate data availability
|
||||
- **Key Points**: Route advertisements must propagate through poll requests
|
||||
- **Fix**: Wait for NodeStore updates after Hostinfo changes
|
||||
|
||||
4. **Database Backend Differences**: SQLite vs PostgreSQL behavior differences
|
||||
- **Use**: `--postgres` flag for database-intensive tests
|
||||
- **Note**: Some timing characteristics differ between backends
|
||||
|
||||
**Debugging Failed Tests**
|
||||
1. **Check test artifacts** in `control_logs/` for detailed logs
|
||||
2. **Examine MapResponse JSON** files for protocol-level debugging
|
||||
3. **Review Headscale stderr logs** for server-side error messages
|
||||
4. **Check Tailscale client status** for network-level issues
|
||||
|
||||
**Resource Management**
|
||||
- Tests require significant disk space (each run ~100MB of logs)
|
||||
- Docker containers are cleaned up automatically on success
|
||||
- Failed tests may leave containers running - clean manually if needed
|
||||
- Use `docker system prune` periodically to reclaim space
|
||||
|
||||
### Best Practices for Test Modifications
|
||||
|
||||
1. **Always test locally** before committing integration test changes
|
||||
2. **Use appropriate timeouts** - too short causes flaky tests, too long slows CI
|
||||
3. **Clean up properly** - ensure tests don't leave persistent state
|
||||
4. **Handle both success and failure paths** in test scenarios
|
||||
5. **Document timing requirements** for complex test scenarios
|
||||
|
||||
## NodeStore Implementation Details
|
||||
|
||||
**Key Insight from Recent Work**: The NodeStore is a critical performance optimization that caches node data in memory while ensuring consistency with the database. When working with route advertisements or node state changes:
|
||||
|
||||
1. **Timing Considerations**: Route advertisements need time to propagate from clients to server. Use `require.EventuallyWithT()` patterns in tests instead of immediate assertions.
|
||||
|
||||
2. **Synchronization Points**: NodeStore updates happen at specific points like `poll.go:420` after Hostinfo changes. Ensure these are maintained when modifying the polling logic.
|
||||
|
||||
3. **Peer Visibility**: The NodeStore's `peersFunc` determines which nodes are visible to each other. Policy-based filtering is separate from monitoring visibility - expired nodes should remain visible for debugging but marked as expired.
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
### Integration Test Patterns
|
||||
```go
|
||||
// Use EventuallyWithT for async operations
|
||||
require.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
// Check expected state
|
||||
}, 10*time.Second, 100*time.Millisecond, "description")
|
||||
|
||||
// Node route checking by actual node properties, not array position
|
||||
var routeNode *v1.Node
|
||||
for _, node := range nodes {
|
||||
if nodeIDStr := fmt.Sprintf("%d", node.GetId()); expectedRoutes[nodeIDStr] != "" {
|
||||
routeNode = node
|
||||
break
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Running Problematic Tests
|
||||
- Some tests require significant time (e.g., `TestNodeOnlineStatus` runs for 12 minutes)
|
||||
- Infrastructure issues like disk space can cause test failures unrelated to code changes
|
||||
- Use `--postgres` flag when testing database-heavy scenarios
|
||||
|
||||
## Important Notes
|
||||
|
||||
- **Dependencies**: Use `nix develop` for consistent toolchain (Go, buf, protobuf tools, linting)
|
||||
- **Protocol Buffers**: Changes to `proto/` require `make generate` and should be committed separately
|
||||
- **Code Style**: Enforced via golangci-lint with golines (width 88) and gofumpt formatting
|
||||
- **Database**: Supports both SQLite (development) and PostgreSQL (production/testing)
|
||||
- **Integration Tests**: Require Docker and can consume significant disk space
|
||||
- **Performance**: NodeStore optimizations are critical for scale - be careful with changes to state management
|
||||
|
||||
## Debugging Integration Tests
|
||||
|
||||
Test artifacts are preserved in `control_logs/TIMESTAMP-ID/` including:
|
||||
- Headscale server logs (stderr/stdout)
|
||||
- Tailscale client logs and status
|
||||
- Database dumps and network captures
|
||||
- MapResponse JSON files for protocol debugging
|
||||
|
||||
When tests fail, check these artifacts first before assuming code issues.
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,201 +0,0 @@
|
||||
# CLI Standardization Summary
|
||||
|
||||
## Changes Made
|
||||
|
||||
### 1. Command Naming Standardization
|
||||
- **Fixed**: `backfillips` → `backfill-ips` (with backward compat alias)
|
||||
- **Fixed**: `dumpConfig` → `dump-config` (with backward compat alias)
|
||||
- **Result**: All commands now use kebab-case consistently
|
||||
|
||||
### 2. Flag Standardization
|
||||
|
||||
#### Node Commands
|
||||
- **Added**: `--node` flag as primary way to specify nodes
|
||||
- **Deprecated**: `--identifier` flag (hidden, marked deprecated)
|
||||
- **Backward Compatible**: Both flags work, `--identifier` shows deprecation warning
|
||||
- **Smart Lookup Ready**: `--node` accepts strings for future name/hostname/IP lookup
|
||||
|
||||
#### User Commands
|
||||
- **Updated**: User identification flow prepared for `--user` flag
|
||||
- **Maintained**: Existing `--name` and `--identifier` flags for backward compatibility
|
||||
|
||||
### 3. Description Consistency
|
||||
- **Fixed**: "Api" → "API" throughout
|
||||
- **Fixed**: Capitalization consistency in short descriptions
|
||||
- **Fixed**: Removed unnecessary periods from short descriptions
|
||||
- **Standardized**: "Handle/Manage the X of Headscale" pattern
|
||||
|
||||
### 4. Type Consistency
|
||||
- **Standardized**: Node IDs use `uint64` consistently
|
||||
- **Maintained**: Backward compatibility with existing flag types
|
||||
|
||||
## Current Status
|
||||
|
||||
### ✅ Completed
|
||||
- Command naming (kebab-case)
|
||||
- Flag deprecation and aliasing
|
||||
- Description standardization
|
||||
- Backward compatibility preservation
|
||||
- Helper functions for flag processing
|
||||
- **SMART LOOKUP IMPLEMENTATION**:
|
||||
- Enhanced `ListNodesRequest` proto with ID, name, hostname, IP filters
|
||||
- Implemented smart filtering in `ListNodes` gRPC method
|
||||
- Added CLI smart lookup functions for nodes and users
|
||||
- Single match validation with helpful error messages
|
||||
- Automatic detection: ID (numeric) vs IP vs name/hostname/email
|
||||
|
||||
### ✅ Smart Lookup Features
|
||||
- **Node Lookup**: By ID, hostname, or IP address
|
||||
- **User Lookup**: By ID, username, or email address
|
||||
- **Single Match Enforcement**: Errors if 0 or >1 matches found
|
||||
- **Helpful Error Messages**: Shows all matches when ambiguous
|
||||
- **Full Backward Compatibility**: All existing flags still work
|
||||
- **Enhanced List Commands**: Both `nodes list` and `users list` support all filter types
|
||||
|
||||
## Breaking Changes
|
||||
|
||||
**None.** All changes maintain full backward compatibility through flag aliases and deprecation warnings.
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### Smart Lookup Algorithm
|
||||
|
||||
1. **Input Detection**:
|
||||
```go
|
||||
if numeric && > 0 -> treat as ID
|
||||
else if contains "@" -> treat as email (users only)
|
||||
else if valid IP address -> treat as IP (nodes only)
|
||||
else -> treat as name/hostname
|
||||
```
|
||||
|
||||
2. **gRPC Filtering**:
|
||||
- Uses enhanced `ListNodes`/`ListUsers` with specific filters
|
||||
- Server-side filtering for optimal performance
|
||||
- Single transaction per lookup
|
||||
|
||||
3. **Match Validation**:
|
||||
- Exactly 1 match: Return ID
|
||||
- 0 matches: Error with "not found" message
|
||||
- >1 matches: Error listing all matches for disambiguation
|
||||
|
||||
### Enhanced Proto Definitions
|
||||
|
||||
```protobuf
|
||||
message ListNodesRequest {
|
||||
string user = 1; // existing
|
||||
uint64 id = 2; // new: filter by ID
|
||||
string name = 3; // new: filter by hostname
|
||||
string hostname = 4; // new: alias for name
|
||||
repeated string ip_addresses = 5; // new: filter by IPs
|
||||
}
|
||||
```
|
||||
|
||||
### Future Enhancements
|
||||
|
||||
- **Fuzzy Matching**: Partial name matching with confirmation
|
||||
- **Recently Used**: Cache recently accessed nodes/users
|
||||
- **Tab Completion**: Shell completion for names/hostnames
|
||||
- **Bulk Operations**: Multi-select with pattern matching
|
||||
|
||||
## Migration Path for Users
|
||||
|
||||
### Now Available (Current Release)
|
||||
```bash
|
||||
# Old way (still works, shows deprecation warning)
|
||||
headscale nodes expire --identifier 123
|
||||
|
||||
# New way with smart lookup:
|
||||
headscale nodes expire --node 123 # by ID
|
||||
headscale nodes expire --node "my-laptop" # by hostname
|
||||
headscale nodes expire --node "100.64.0.1" # by Tailscale IP
|
||||
headscale nodes expire --node "192.168.1.100" # by real IP
|
||||
|
||||
# User operations:
|
||||
headscale users destroy --user 123 # by ID
|
||||
headscale users destroy --user "alice" # by username
|
||||
headscale users destroy --user "alice@company.com" # by email
|
||||
|
||||
# Enhanced list commands with filtering:
|
||||
headscale nodes list --node "laptop" # filter nodes by name
|
||||
headscale nodes list --ip "100.64.0.1" # filter nodes by IP
|
||||
headscale nodes list --user "alice" # filter nodes by user
|
||||
headscale users list --user "alice" # smart lookup user
|
||||
headscale users list --email "@company.com" # filter by email domain
|
||||
headscale users list --name "alice" # filter by exact name
|
||||
|
||||
# Error handling examples:
|
||||
headscale nodes expire --node "laptop"
|
||||
# Error: multiple nodes found matching 'laptop': ID=1 name=laptop-alice, ID=2 name=laptop-bob
|
||||
|
||||
headscale nodes expire --node "nonexistent"
|
||||
# Error: no node found matching 'nonexistent'
|
||||
```
|
||||
|
||||
## Command Structure Overview
|
||||
|
||||
```
|
||||
headscale [global-flags] <command> [command-flags] <subcommand> [subcommand-flags] [args]
|
||||
|
||||
Global Flags:
|
||||
--config, -c config file path
|
||||
--output, -o output format (json, yaml, json-line)
|
||||
--force disable prompts
|
||||
|
||||
Commands:
|
||||
├── serve
|
||||
├── version
|
||||
├── config-test
|
||||
├── dump-config (alias: dumpConfig)
|
||||
├── mockoidc
|
||||
├── generate/
|
||||
│ └── private-key
|
||||
├── nodes/
|
||||
│ ├── list (--user, --tags, --columns)
|
||||
│ ├── register (--user, --key)
|
||||
│ ├── list-routes (--node)
|
||||
│ ├── expire (--node)
|
||||
│ ├── rename (--node) <new-name>
|
||||
│ ├── delete (--node)
|
||||
│ ├── move (--node, --user)
|
||||
│ ├── tag (--node, --tags)
|
||||
│ ├── approve-routes (--node, --routes)
|
||||
│ └── backfill-ips (alias: backfillips)
|
||||
├── users/
|
||||
│ ├── create <name> (--display-name, --email, --picture-url)
|
||||
│ ├── list (--user, --name, --email, --columns)
|
||||
│ ├── destroy (--user|--name|--identifier)
|
||||
│ └── rename (--user|--name|--identifier, --new-name)
|
||||
├── apikeys/
|
||||
│ ├── list
|
||||
│ ├── create (--expiration)
|
||||
│ ├── expire (--prefix)
|
||||
│ └── delete (--prefix)
|
||||
├── preauthkeys/
|
||||
│ ├── list (--user)
|
||||
│ ├── create (--user, --reusable, --ephemeral, --expiration, --tags)
|
||||
│ └── expire (--user) <key>
|
||||
├── policy/
|
||||
│ ├── get
|
||||
│ ├── set (--file)
|
||||
│ └── check (--file)
|
||||
└── debug/
|
||||
└── create-node (--name, --user, --key, --route)
|
||||
```
|
||||
|
||||
## Deprecated Flags
|
||||
|
||||
All deprecated flags continue to work but show warnings:
|
||||
|
||||
- `--identifier` → use `--node` (for node commands) or `--user` (for user commands)
|
||||
- `--namespace` → use `--user` (already implemented)
|
||||
- `dumpConfig` → use `dump-config`
|
||||
- `backfillips` → use `backfill-ips`
|
||||
|
||||
## Error Handling
|
||||
|
||||
Improved error messages provide clear guidance:
|
||||
```
|
||||
Error: node specifier must be a numeric ID (smart lookup by name/hostname/IP not yet implemented)
|
||||
Error: --node flag is required
|
||||
Error: --user flag is required
|
||||
```
|
||||
@@ -2,7 +2,7 @@
|
||||
# and are in no way endorsed by Headscale's maintainers as an
|
||||
# official nor supported release or distribution.
|
||||
|
||||
FROM docker.io/golang:1.24-bookworm
|
||||
FROM docker.io/golang:1.23-bookworm
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
@@ -18,7 +18,7 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -a ./cmd/headscale && test -e /go/bin/headscale
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale && test -e /go/bin/headscale
|
||||
|
||||
# Need to reset the entrypoint or everything will run as a busybox script
|
||||
ENTRYPOINT []
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# This Dockerfile is more or less lifted from tailscale/tailscale
|
||||
# to ensure a similar build process when testing the HEAD of tailscale.
|
||||
|
||||
FROM golang:1.24-alpine AS build-env
|
||||
FROM golang:1.23-alpine AS build-env
|
||||
|
||||
WORKDIR /go/src
|
||||
|
||||
|
||||
158
Makefile
158
Makefile
@@ -1,130 +1,64 @@
|
||||
# Headscale Makefile
|
||||
# Modern Makefile following best practices
|
||||
# Calculate version
|
||||
version ?= $(shell git describe --always --tags --dirty)
|
||||
|
||||
# Version calculation
|
||||
VERSION ?= $(shell git describe --always --tags --dirty)
|
||||
rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d))
|
||||
|
||||
# Build configuration
|
||||
# Determine if OS supports pie
|
||||
GOOS ?= $(shell uname | tr '[:upper:]' '[:lower:]')
|
||||
ifeq ($(filter $(GOOS), openbsd netbsd solaris plan9), )
|
||||
PIE_FLAGS = -buildmode=pie
|
||||
ifeq ($(filter $(GOOS), openbsd netbsd soloaris plan9), )
|
||||
pieflags = -buildmode=pie
|
||||
else
|
||||
endif
|
||||
|
||||
# Tool availability check with nix warning
|
||||
define check_tool
|
||||
@command -v $(1) >/dev/null 2>&1 || { \
|
||||
echo "Warning: $(1) not found. Run 'nix develop' to ensure all dependencies are available."; \
|
||||
exit 1; \
|
||||
}
|
||||
endef
|
||||
|
||||
# Source file collections using shell find for better performance
|
||||
GO_SOURCES := $(shell find . -name '*.go' -not -path './gen/*' -not -path './vendor/*')
|
||||
PROTO_SOURCES := $(shell find . -name '*.proto' -not -path './gen/*' -not -path './vendor/*')
|
||||
DOC_SOURCES := $(shell find . \( -name '*.md' -o -name '*.yaml' -o -name '*.yml' -o -name '*.ts' -o -name '*.js' -o -name '*.html' -o -name '*.css' -o -name '*.scss' -o -name '*.sass' \) -not -path './gen/*' -not -path './vendor/*' -not -path './node_modules/*')
|
||||
|
||||
# Default target
|
||||
.PHONY: all
|
||||
all: lint test build
|
||||
|
||||
# Dependency checking
|
||||
.PHONY: check-deps
|
||||
check-deps:
|
||||
$(call check_tool,go)
|
||||
$(call check_tool,golangci-lint)
|
||||
$(call check_tool,gofumpt)
|
||||
$(call check_tool,prettier)
|
||||
$(call check_tool,clang-format)
|
||||
$(call check_tool,buf)
|
||||
|
||||
# Build targets
|
||||
.PHONY: build
|
||||
build: check-deps $(GO_SOURCES) go.mod go.sum
|
||||
@echo "Building headscale..."
|
||||
go build $(PIE_FLAGS) -ldflags "-X main.version=$(VERSION)" -o headscale ./cmd/headscale
|
||||
|
||||
# Test targets
|
||||
.PHONY: test
|
||||
test: check-deps $(GO_SOURCES) go.mod go.sum
|
||||
@echo "Running Go tests..."
|
||||
go test -race ./...
|
||||
# GO_SOURCES = $(wildcard *.go)
|
||||
# PROTO_SOURCES = $(wildcard **/*.proto)
|
||||
GO_SOURCES = $(call rwildcard,,*.go)
|
||||
PROTO_SOURCES = $(call rwildcard,,*.proto)
|
||||
|
||||
|
||||
# Formatting targets
|
||||
.PHONY: fmt
|
||||
build:
|
||||
nix build
|
||||
|
||||
dev: lint test build
|
||||
|
||||
test:
|
||||
gotestsum -- -short -race -coverprofile=coverage.out ./...
|
||||
|
||||
test_integration:
|
||||
docker run \
|
||||
-t --rm \
|
||||
-v ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
-v $$PWD:$$PWD -w $$PWD/integration \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-v $$PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- -race -failfast ./... -timeout 120m -parallel 8
|
||||
|
||||
lint:
|
||||
golangci-lint run --fix --timeout 10m
|
||||
|
||||
fmt: fmt-go fmt-prettier fmt-proto
|
||||
|
||||
.PHONY: fmt-go
|
||||
fmt-go: check-deps $(GO_SOURCES)
|
||||
@echo "Formatting Go code..."
|
||||
fmt-prettier:
|
||||
prettier --write '**/**.{ts,js,md,yaml,yml,sass,css,scss,html}'
|
||||
prettier --write --print-width 80 --prose-wrap always CHANGELOG.md
|
||||
|
||||
fmt-go:
|
||||
# TODO(kradalby): Reeval if we want to use 88 in the future.
|
||||
# golines --max-len=88 --base-formatter=gofumpt -w $(GO_SOURCES)
|
||||
gofumpt -l -w .
|
||||
golangci-lint run --fix
|
||||
|
||||
.PHONY: fmt-prettier
|
||||
fmt-prettier: check-deps $(DOC_SOURCES)
|
||||
@echo "Formatting documentation and config files..."
|
||||
prettier --write '**/*.{ts,js,md,yaml,yml,sass,css,scss,html}'
|
||||
prettier --write --print-width 80 --prose-wrap always CHANGELOG.md
|
||||
|
||||
.PHONY: fmt-proto
|
||||
fmt-proto: check-deps $(PROTO_SOURCES)
|
||||
@echo "Formatting Protocol Buffer files..."
|
||||
fmt-proto:
|
||||
clang-format -i $(PROTO_SOURCES)
|
||||
|
||||
# Linting targets
|
||||
.PHONY: lint
|
||||
lint: lint-go lint-proto
|
||||
proto-lint:
|
||||
cd proto/ && go run github.com/bufbuild/buf/cmd/buf lint
|
||||
|
||||
.PHONY: lint-go
|
||||
lint-go: check-deps $(GO_SOURCES) go.mod go.sum
|
||||
@echo "Linting Go code..."
|
||||
golangci-lint run --timeout 10m
|
||||
compress: build
|
||||
upx --brute headscale
|
||||
|
||||
.PHONY: lint-proto
|
||||
lint-proto: check-deps $(PROTO_SOURCES)
|
||||
@echo "Linting Protocol Buffer files..."
|
||||
cd proto/ && buf lint
|
||||
|
||||
# Code generation
|
||||
.PHONY: generate
|
||||
generate: check-deps $(PROTO_SOURCES)
|
||||
@echo "Generating code from Protocol Buffers..."
|
||||
generate:
|
||||
rm -rf gen
|
||||
buf generate proto
|
||||
|
||||
# Clean targets
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -rf headscale gen
|
||||
|
||||
# Development workflow
|
||||
.PHONY: dev
|
||||
dev: fmt lint test build
|
||||
|
||||
# Help target
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo "Headscale Development Makefile"
|
||||
@echo ""
|
||||
@echo "Main targets:"
|
||||
@echo " all - Run lint, test, and build (default)"
|
||||
@echo " build - Build headscale binary"
|
||||
@echo " test - Run Go tests"
|
||||
@echo " fmt - Format all code (Go, docs, proto)"
|
||||
@echo " lint - Lint all code (Go, proto)"
|
||||
@echo " generate - Generate code from Protocol Buffers"
|
||||
@echo " dev - Full development workflow (fmt + lint + test + build)"
|
||||
@echo " clean - Clean build artifacts"
|
||||
@echo ""
|
||||
@echo "Specific targets:"
|
||||
@echo " fmt-go - Format Go code only"
|
||||
@echo " fmt-prettier - Format documentation only"
|
||||
@echo " fmt-proto - Format Protocol Buffer files only"
|
||||
@echo " lint-go - Lint Go code only"
|
||||
@echo " lint-proto - Lint Protocol Buffer files only"
|
||||
@echo ""
|
||||
@echo "Dependencies:"
|
||||
@echo " check-deps - Verify required tools are available"
|
||||
@echo ""
|
||||
@echo "Note: If not running in a nix shell, ensure dependencies are available:"
|
||||
@echo " nix develop"
|
||||
25
README.md
25
README.md
@@ -7,12 +7,8 @@ An open source, self-hosted implementation of the Tailscale control server.
|
||||
Join our [Discord server](https://discord.gg/c84AZQhmpx) for a chat.
|
||||
|
||||
**Note:** Always select the same GitHub tag as the released version you use
|
||||
to ensure you have the correct example configuration. The `main` branch might
|
||||
contain unreleased changes. The documentation is available for stable and
|
||||
development versions:
|
||||
|
||||
- [Documentation for the stable version](https://headscale.net/stable/)
|
||||
- [Documentation for the development version](https://headscale.net/development/)
|
||||
to ensure you have the correct example configuration and documentation.
|
||||
The `main` branch might contain unreleased changes.
|
||||
|
||||
## What is Tailscale
|
||||
|
||||
@@ -139,28 +135,15 @@ make test
|
||||
To build the program:
|
||||
|
||||
```shell
|
||||
make build
|
||||
nix build
|
||||
```
|
||||
|
||||
### Development workflow
|
||||
or
|
||||
|
||||
We recommend using Nix for dependency management to ensure you have all required tools. If you prefer to manage dependencies yourself, you can use Make directly:
|
||||
|
||||
**With Nix (recommended):**
|
||||
```shell
|
||||
nix develop
|
||||
make test
|
||||
make build
|
||||
```
|
||||
|
||||
**With your own dependencies:**
|
||||
```shell
|
||||
make test
|
||||
make build
|
||||
```
|
||||
|
||||
The Makefile will warn you if any required tools are missing and suggest running `nix develop`. Run `make help` to see all available targets.
|
||||
|
||||
## Contributors
|
||||
|
||||
<a href="https://github.com/juanfont/headscale/graphs/contributors">
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
@@ -15,6 +14,11 @@ import (
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
// 90 days.
|
||||
DefaultAPIKeyExpiry = "90d"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(apiKeysCmd)
|
||||
apiKeysCmd.AddCommand(listAPIKeys)
|
||||
@@ -39,80 +43,75 @@ func init() {
|
||||
|
||||
var apiKeysCmd = &cobra.Command{
|
||||
Use: "apikeys",
|
||||
Short: "Handle the API keys in Headscale",
|
||||
Short: "Handle the Api keys in Headscale",
|
||||
Aliases: []string{"apikey", "api"},
|
||||
}
|
||||
|
||||
var listAPIKeys = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List the API keys for Headscale",
|
||||
Short: "List the Api keys for headscale",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
err := WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
request := &v1.ListApiKeysRequest{}
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.ListApiKeys(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting the list of keys: %s", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
request := &v1.ListApiKeysRequest{}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetApiKeys(), "", output)
|
||||
return nil
|
||||
}
|
||||
|
||||
tableData := pterm.TableData{
|
||||
{"ID", "Prefix", "Expiration", "Created"},
|
||||
}
|
||||
for _, key := range response.GetApiKeys() {
|
||||
expiration := "-"
|
||||
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = ColourTime(key.GetExpiration().AsTime())
|
||||
}
|
||||
|
||||
tableData = append(tableData, []string{
|
||||
strconv.FormatUint(key.GetId(), util.Base10),
|
||||
key.GetPrefix(),
|
||||
expiration,
|
||||
key.GetCreatedAt().AsTime().Format(HeadscaleDateTimeFormat),
|
||||
})
|
||||
|
||||
}
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
response, err := client.ListApiKeys(ctx, request)
|
||||
if err != nil {
|
||||
return
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting the list of keys: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetApiKeys(), "", output)
|
||||
}
|
||||
|
||||
tableData := pterm.TableData{
|
||||
{"ID", "Prefix", "Expiration", "Created"},
|
||||
}
|
||||
for _, key := range response.GetApiKeys() {
|
||||
expiration := "-"
|
||||
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = ColourTime(key.GetExpiration().AsTime())
|
||||
}
|
||||
|
||||
tableData = append(tableData, []string{
|
||||
strconv.FormatUint(key.GetId(), util.Base10),
|
||||
key.GetPrefix(),
|
||||
expiration,
|
||||
key.GetCreatedAt().AsTime().Format(HeadscaleDateTimeFormat),
|
||||
})
|
||||
|
||||
}
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var createAPIKeyCmd = &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Create a new API key",
|
||||
Short: "Creates a new Api key",
|
||||
Long: `
|
||||
Creates a new Api key, the Api key is only visible on creation
|
||||
and cannot be retrieved again.
|
||||
If you loose a key, create a new one and revoke (expire) the old one.`,
|
||||
Aliases: []string{"c", "new"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
request := &v1.CreateApiKeyRequest{}
|
||||
|
||||
@@ -125,101 +124,99 @@ If you loose a key, create a new one and revoke (expire) the old one.`,
|
||||
fmt.Sprintf("Could not parse duration: %s\n", err),
|
||||
output,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
expiration := time.Now().UTC().Add(time.Duration(duration))
|
||||
|
||||
request.Expiration = timestamppb.New(expiration)
|
||||
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
response, err := client.CreateApiKey(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot create Api Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
SuccessOutput(response.GetApiKey(), response.GetApiKey(), output)
|
||||
return nil
|
||||
})
|
||||
response, err := client.CreateApiKey(ctx, request)
|
||||
if err != nil {
|
||||
return
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot create Api Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetApiKey(), response.GetApiKey(), output)
|
||||
},
|
||||
}
|
||||
|
||||
var expireAPIKeyCmd = &cobra.Command{
|
||||
Use: "expire",
|
||||
Short: "Expire an API key",
|
||||
Short: "Expire an ApiKey",
|
||||
Aliases: []string{"revoke", "exp", "e"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
prefix, err := cmd.Flags().GetString("prefix")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting prefix from CLI flag: %s", err), output)
|
||||
return
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting prefix from CLI flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
request := &v1.ExpireApiKeyRequest{
|
||||
Prefix: prefix,
|
||||
}
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.ExpireApiKey(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot expire Api Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
request := &v1.ExpireApiKeyRequest{
|
||||
Prefix: prefix,
|
||||
}
|
||||
|
||||
SuccessOutput(response, "Key expired", output)
|
||||
return nil
|
||||
})
|
||||
response, err := client.ExpireApiKey(ctx, request)
|
||||
if err != nil {
|
||||
return
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot expire Api Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(response, "Key expired", output)
|
||||
},
|
||||
}
|
||||
|
||||
var deleteAPIKeyCmd = &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete an API key",
|
||||
Short: "Delete an ApiKey",
|
||||
Aliases: []string{"remove", "del"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
prefix, err := cmd.Flags().GetString("prefix")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting prefix from CLI flag: %s", err), output)
|
||||
return
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting prefix from CLI flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
request := &v1.DeleteApiKeyRequest{
|
||||
Prefix: prefix,
|
||||
}
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.DeleteApiKey(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot delete Api Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
request := &v1.DeleteApiKeyRequest{
|
||||
Prefix: prefix,
|
||||
}
|
||||
|
||||
SuccessOutput(response, "Key deleted", output)
|
||||
return nil
|
||||
})
|
||||
response, err := client.DeleteApiKey(ctx, request)
|
||||
if err != nil {
|
||||
return
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot delete Api Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(response, "Key deleted", output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
)
|
||||
|
||||
// WithClient handles gRPC client setup and cleanup, calls fn with client and context
|
||||
func WithClient(fn func(context.Context, v1.HeadscaleServiceClient) error) error {
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
return fn(ctx, client)
|
||||
}
|
||||
@@ -11,8 +11,8 @@ func init() {
|
||||
|
||||
var configTestCmd = &cobra.Command{
|
||||
Use: "configtest",
|
||||
Short: "Test the configuration",
|
||||
Long: "Run a test of the configuration and exit",
|
||||
Short: "Test the configuration.",
|
||||
Long: "Run a test of the configuration and exit.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
_, err := newHeadscaleServerWithConfig()
|
||||
if err != nil {
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConfigTestCommand(t *testing.T) {
|
||||
// Test that the configtest command exists and is properly configured
|
||||
assert.NotNil(t, configTestCmd)
|
||||
assert.Equal(t, "configtest", configTestCmd.Use)
|
||||
assert.Equal(t, "Test the configuration.", configTestCmd.Short)
|
||||
assert.Equal(t, "Run a test of the configuration and exit.", configTestCmd.Long)
|
||||
assert.NotNil(t, configTestCmd.Run)
|
||||
}
|
||||
|
||||
func TestConfigTestCommandInRootCommand(t *testing.T) {
|
||||
// Test that configtest is available as a subcommand of root
|
||||
cmd, _, err := rootCmd.Find([]string{"configtest"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "configtest", cmd.Name())
|
||||
assert.Equal(t, configTestCmd, cmd)
|
||||
}
|
||||
|
||||
func TestConfigTestCommandHelp(t *testing.T) {
|
||||
// Test that the command has proper help text
|
||||
assert.NotEmpty(t, configTestCmd.Short)
|
||||
assert.NotEmpty(t, configTestCmd.Long)
|
||||
assert.Contains(t, configTestCmd.Short, "configuration")
|
||||
assert.Contains(t, configTestCmd.Long, "test")
|
||||
assert.Contains(t, configTestCmd.Long, "configuration")
|
||||
}
|
||||
|
||||
// Note: We can't easily test the actual execution of configtest because:
|
||||
// 1. It depends on configuration files being present
|
||||
// 2. It calls log.Fatal() which would exit the test process
|
||||
// 3. It tries to initialize a full Headscale server
|
||||
//
|
||||
// In a real refactor, we would:
|
||||
// 1. Extract the configuration validation logic to a testable function
|
||||
// 2. Return errors instead of calling log.Fatal()
|
||||
// 3. Accept configuration as a parameter instead of loading from global state
|
||||
//
|
||||
// For now, we test the command structure and that it's properly wired up.
|
||||
@@ -1,7 +1,6 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
@@ -15,6 +14,11 @@ const (
|
||||
errPreAuthKeyMalformed = Error("key is malformed. expected 64 hex characters with `nodekey` prefix")
|
||||
)
|
||||
|
||||
// Error is used to compare errors as per https://dave.cheney.net/2016/04/07/constant-errors
|
||||
type Error string
|
||||
|
||||
func (e Error) Error() string { return string(e) }
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(debugCmd)
|
||||
|
||||
@@ -25,6 +29,11 @@ func init() {
|
||||
}
|
||||
createNodeCmd.Flags().StringP("user", "u", "", "User")
|
||||
|
||||
createNodeCmd.Flags().StringP("namespace", "n", "", "User")
|
||||
createNodeNamespaceFlag := createNodeCmd.Flags().Lookup("namespace")
|
||||
createNodeNamespaceFlag.Deprecated = deprecateNamespaceMessage
|
||||
createNodeNamespaceFlag.Hidden = true
|
||||
|
||||
err = createNodeCmd.MarkFlagRequired("user")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
@@ -50,14 +59,17 @@ var createNodeCmd = &cobra.Command{
|
||||
Use: "create-node",
|
||||
Short: "Create a node that can be registered with `nodes register <>` command",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
user, err := cmd.Flags().GetString("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
name, err := cmd.Flags().GetString("name")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
@@ -65,7 +77,6 @@ var createNodeCmd = &cobra.Command{
|
||||
fmt.Sprintf("Error getting node from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
registrationID, err := cmd.Flags().GetString("key")
|
||||
@@ -75,7 +86,6 @@ var createNodeCmd = &cobra.Command{
|
||||
fmt.Sprintf("Error getting key from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = types.RegistrationIDFromString(registrationID)
|
||||
@@ -85,7 +95,6 @@ var createNodeCmd = &cobra.Command{
|
||||
fmt.Sprintf("Failed to parse machine key from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
routes, err := cmd.Flags().GetStringSlice("route")
|
||||
@@ -95,32 +104,24 @@ var createNodeCmd = &cobra.Command{
|
||||
fmt.Sprintf("Error getting routes from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
request := &v1.DebugCreateNodeRequest{
|
||||
Key: registrationID,
|
||||
Name: name,
|
||||
User: user,
|
||||
Routes: routes,
|
||||
}
|
||||
request := &v1.DebugCreateNodeRequest{
|
||||
Key: registrationID,
|
||||
Name: name,
|
||||
User: user,
|
||||
Routes: routes,
|
||||
}
|
||||
|
||||
response, err := client.DebugCreateNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot create node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetNode(), "Node created", output)
|
||||
return nil
|
||||
})
|
||||
response, err := client.DebugCreateNode(ctx, request)
|
||||
if err != nil {
|
||||
return
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot create node: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetNode(), "Node created", output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,144 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDebugCommand(t *testing.T) {
|
||||
// Test that the debug command exists and is properly configured
|
||||
assert.NotNil(t, debugCmd)
|
||||
assert.Equal(t, "debug", debugCmd.Use)
|
||||
assert.Equal(t, "debug and testing commands", debugCmd.Short)
|
||||
assert.Equal(t, "debug contains extra commands used for debugging and testing headscale", debugCmd.Long)
|
||||
}
|
||||
|
||||
func TestDebugCommandInRootCommand(t *testing.T) {
|
||||
// Test that debug is available as a subcommand of root
|
||||
cmd, _, err := rootCmd.Find([]string{"debug"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "debug", cmd.Name())
|
||||
assert.Equal(t, debugCmd, cmd)
|
||||
}
|
||||
|
||||
func TestCreateNodeCommand(t *testing.T) {
|
||||
// Test that the create-node command exists and is properly configured
|
||||
assert.NotNil(t, createNodeCmd)
|
||||
assert.Equal(t, "create-node", createNodeCmd.Use)
|
||||
assert.Equal(t, "Create a node that can be registered with `nodes register <>` command", createNodeCmd.Short)
|
||||
assert.NotNil(t, createNodeCmd.Run)
|
||||
}
|
||||
|
||||
func TestCreateNodeCommandInDebugCommand(t *testing.T) {
|
||||
// Test that create-node is available as a subcommand of debug
|
||||
cmd, _, err := rootCmd.Find([]string{"debug", "create-node"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "create-node", cmd.Name())
|
||||
assert.Equal(t, createNodeCmd, cmd)
|
||||
}
|
||||
|
||||
func TestCreateNodeCommandFlags(t *testing.T) {
|
||||
// Test that create-node has the required flags
|
||||
|
||||
// Test name flag
|
||||
nameFlag := createNodeCmd.Flags().Lookup("name")
|
||||
assert.NotNil(t, nameFlag)
|
||||
assert.Equal(t, "", nameFlag.Shorthand) // No shorthand for name
|
||||
assert.Equal(t, "", nameFlag.DefValue)
|
||||
|
||||
// Test user flag
|
||||
userFlag := createNodeCmd.Flags().Lookup("user")
|
||||
assert.NotNil(t, userFlag)
|
||||
assert.Equal(t, "u", userFlag.Shorthand)
|
||||
|
||||
// Test key flag
|
||||
keyFlag := createNodeCmd.Flags().Lookup("key")
|
||||
assert.NotNil(t, keyFlag)
|
||||
assert.Equal(t, "k", keyFlag.Shorthand)
|
||||
|
||||
// Test route flag
|
||||
routeFlag := createNodeCmd.Flags().Lookup("route")
|
||||
assert.NotNil(t, routeFlag)
|
||||
assert.Equal(t, "r", routeFlag.Shorthand)
|
||||
|
||||
}
|
||||
|
||||
func TestCreateNodeCommandRequiredFlags(t *testing.T) {
|
||||
// Test that required flags are marked as required
|
||||
// We can't easily test the actual requirement enforcement without executing the command
|
||||
// But we can test that the flags exist and have the expected properties
|
||||
|
||||
// These flags should be required based on the init() function
|
||||
requiredFlags := []string{"name", "user", "key"}
|
||||
|
||||
for _, flagName := range requiredFlags {
|
||||
flag := createNodeCmd.Flags().Lookup(flagName)
|
||||
assert.NotNil(t, flag, "Required flag %s should exist", flagName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorType(t *testing.T) {
|
||||
// Test the Error type implementation
|
||||
err := errPreAuthKeyMalformed
|
||||
assert.Equal(t, "key is malformed. expected 64 hex characters with `nodekey` prefix", err.Error())
|
||||
assert.Equal(t, "key is malformed. expected 64 hex characters with `nodekey` prefix", string(err))
|
||||
|
||||
// Test that it implements the error interface
|
||||
var genericErr error = err
|
||||
assert.Equal(t, "key is malformed. expected 64 hex characters with `nodekey` prefix", genericErr.Error())
|
||||
}
|
||||
|
||||
func TestErrorConstants(t *testing.T) {
|
||||
// Test that error constants are defined properly
|
||||
assert.Equal(t, Error("key is malformed. expected 64 hex characters with `nodekey` prefix"), errPreAuthKeyMalformed)
|
||||
}
|
||||
|
||||
func TestDebugCommandStructure(t *testing.T) {
|
||||
// Test that debug has create-node as a subcommand
|
||||
found := false
|
||||
for _, subcmd := range debugCmd.Commands() {
|
||||
if subcmd.Name() == "create-node" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "create-node should be a subcommand of debug")
|
||||
}
|
||||
|
||||
func TestCreateNodeCommandHelp(t *testing.T) {
|
||||
// Test that the command has proper help text
|
||||
assert.NotEmpty(t, createNodeCmd.Short)
|
||||
assert.Contains(t, createNodeCmd.Short, "Create a node")
|
||||
assert.Contains(t, createNodeCmd.Short, "nodes register")
|
||||
}
|
||||
|
||||
func TestCreateNodeCommandFlagDescriptions(t *testing.T) {
|
||||
// Test that flags have appropriate usage descriptions
|
||||
nameFlag := createNodeCmd.Flags().Lookup("name")
|
||||
assert.Equal(t, "Name", nameFlag.Usage)
|
||||
|
||||
userFlag := createNodeCmd.Flags().Lookup("user")
|
||||
assert.Equal(t, "User", userFlag.Usage)
|
||||
|
||||
keyFlag := createNodeCmd.Flags().Lookup("key")
|
||||
assert.Equal(t, "Key", keyFlag.Usage)
|
||||
|
||||
routeFlag := createNodeCmd.Flags().Lookup("route")
|
||||
assert.Contains(t, routeFlag.Usage, "routes to advertise")
|
||||
|
||||
}
|
||||
|
||||
// Note: We can't easily test the actual execution of create-node because:
|
||||
// 1. It depends on gRPC client configuration
|
||||
// 2. It calls SuccessOutput/ErrorOutput which exit the process
|
||||
// 3. It requires valid registration keys and user setup
|
||||
//
|
||||
// In a real refactor, we would:
|
||||
// 1. Extract the business logic to testable functions
|
||||
// 2. Use dependency injection for the gRPC client
|
||||
// 3. Return errors instead of calling ErrorOutput/SuccessOutput
|
||||
// 4. Add validation functions that can be tested independently
|
||||
//
|
||||
// For now, we test the command structure and flag configuration.
|
||||
@@ -12,10 +12,9 @@ func init() {
|
||||
}
|
||||
|
||||
var dumpConfigCmd = &cobra.Command{
|
||||
Use: "dump-config",
|
||||
Short: "Dump current config to /etc/headscale/config.dump.yaml, integration test only",
|
||||
Aliases: []string{"dumpConfig"},
|
||||
Hidden: true,
|
||||
Use: "dumpConfig",
|
||||
Short: "dump current config to /etc/headscale/config.dump.yaml, integration test only",
|
||||
Hidden: true,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
|
||||
@@ -22,7 +22,7 @@ var generatePrivateKeyCmd = &cobra.Command{
|
||||
Use: "private-key",
|
||||
Short: "Generate a private key for the headscale server",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
machineKeyStr, err := machineKey.MarshalText()
|
||||
|
||||
@@ -1,230 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestGenerateCommand(t *testing.T) {
|
||||
// Test that the generate command exists and shows help
|
||||
cmd := &cobra.Command{
|
||||
Use: "headscale",
|
||||
Short: "headscale - a Tailscale control server",
|
||||
}
|
||||
|
||||
cmd.AddCommand(generateCmd)
|
||||
|
||||
out := new(bytes.Buffer)
|
||||
cmd.SetOut(out)
|
||||
cmd.SetErr(out)
|
||||
cmd.SetArgs([]string{"generate", "--help"})
|
||||
|
||||
err := cmd.Execute()
|
||||
require.NoError(t, err)
|
||||
|
||||
outStr := out.String()
|
||||
assert.Contains(t, outStr, "Generate commands")
|
||||
assert.Contains(t, outStr, "private-key")
|
||||
assert.Contains(t, outStr, "Aliases:")
|
||||
assert.Contains(t, outStr, "gen")
|
||||
}
|
||||
|
||||
func TestGenerateCommandAlias(t *testing.T) {
|
||||
// Test that the "gen" alias works
|
||||
cmd := &cobra.Command{
|
||||
Use: "headscale",
|
||||
Short: "headscale - a Tailscale control server",
|
||||
}
|
||||
|
||||
cmd.AddCommand(generateCmd)
|
||||
|
||||
out := new(bytes.Buffer)
|
||||
cmd.SetOut(out)
|
||||
cmd.SetErr(out)
|
||||
cmd.SetArgs([]string{"gen", "--help"})
|
||||
|
||||
err := cmd.Execute()
|
||||
require.NoError(t, err)
|
||||
|
||||
outStr := out.String()
|
||||
assert.Contains(t, outStr, "Generate commands")
|
||||
}
|
||||
|
||||
func TestGeneratePrivateKeyCommand(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
expectJSON bool
|
||||
expectYAML bool
|
||||
}{
|
||||
{
|
||||
name: "default output",
|
||||
args: []string{"generate", "private-key"},
|
||||
expectJSON: false,
|
||||
expectYAML: false,
|
||||
},
|
||||
{
|
||||
name: "json output",
|
||||
args: []string{"generate", "private-key", "--output", "json"},
|
||||
expectJSON: true,
|
||||
expectYAML: false,
|
||||
},
|
||||
{
|
||||
name: "yaml output",
|
||||
args: []string{"generate", "private-key", "--output", "yaml"},
|
||||
expectJSON: false,
|
||||
expectYAML: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Note: This command calls SuccessOutput which exits the process
|
||||
// We can't test the actual execution easily without mocking
|
||||
// Instead, we test the command structure and that it exists
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "headscale",
|
||||
Short: "headscale - a Tailscale control server",
|
||||
}
|
||||
|
||||
cmd.AddCommand(generateCmd)
|
||||
cmd.PersistentFlags().StringP("output", "o", "", "Output format")
|
||||
|
||||
// Test that the command exists and can be found
|
||||
privateKeyCmd, _, err := cmd.Find([]string{"generate", "private-key"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "private-key", privateKeyCmd.Name())
|
||||
assert.Equal(t, "Generate a private key for the headscale server", privateKeyCmd.Short)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneratePrivateKeyHelp(t *testing.T) {
|
||||
cmd := &cobra.Command{
|
||||
Use: "headscale",
|
||||
Short: "headscale - a Tailscale control server",
|
||||
}
|
||||
|
||||
cmd.AddCommand(generateCmd)
|
||||
|
||||
out := new(bytes.Buffer)
|
||||
cmd.SetOut(out)
|
||||
cmd.SetErr(out)
|
||||
cmd.SetArgs([]string{"generate", "private-key", "--help"})
|
||||
|
||||
err := cmd.Execute()
|
||||
require.NoError(t, err)
|
||||
|
||||
outStr := out.String()
|
||||
assert.Contains(t, outStr, "Generate a private key for the headscale server")
|
||||
assert.Contains(t, outStr, "Usage:")
|
||||
}
|
||||
|
||||
// Test the key generation logic in isolation (without SuccessOutput/ErrorOutput)
|
||||
func TestPrivateKeyGeneration(t *testing.T) {
|
||||
// We can't easily test the full command because it calls SuccessOutput which exits
|
||||
// But we can test that the key generation produces valid output format
|
||||
|
||||
// This is testing the core logic that would be in the command
|
||||
// In a real refactor, we'd extract this to a testable function
|
||||
|
||||
// For now, we can test that the command structure is correct
|
||||
assert.NotNil(t, generatePrivateKeyCmd)
|
||||
assert.Equal(t, "private-key", generatePrivateKeyCmd.Use)
|
||||
assert.Equal(t, "Generate a private key for the headscale server", generatePrivateKeyCmd.Short)
|
||||
assert.NotNil(t, generatePrivateKeyCmd.Run)
|
||||
}
|
||||
|
||||
func TestGenerateCommandStructure(t *testing.T) {
|
||||
// Test the command hierarchy
|
||||
assert.Equal(t, "generate", generateCmd.Use)
|
||||
assert.Equal(t, "Generate commands", generateCmd.Short)
|
||||
assert.Contains(t, generateCmd.Aliases, "gen")
|
||||
|
||||
// Test that private-key is a subcommand
|
||||
found := false
|
||||
for _, subcmd := range generateCmd.Commands() {
|
||||
if subcmd.Name() == "private-key" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "private-key should be a subcommand of generate")
|
||||
}
|
||||
|
||||
// Helper function to test output formats (would be used if we refactored the command)
|
||||
func validatePrivateKeyOutput(t *testing.T, output string, format string) {
|
||||
switch format {
|
||||
case "json":
|
||||
var result map[string]interface{}
|
||||
err := json.Unmarshal([]byte(output), &result)
|
||||
require.NoError(t, err, "Output should be valid JSON")
|
||||
|
||||
privateKey, exists := result["private_key"]
|
||||
require.True(t, exists, "JSON should contain private_key field")
|
||||
|
||||
keyStr, ok := privateKey.(string)
|
||||
require.True(t, ok, "private_key should be a string")
|
||||
require.NotEmpty(t, keyStr, "private_key should not be empty")
|
||||
|
||||
// Basic validation that it looks like a machine key
|
||||
assert.True(t, strings.HasPrefix(keyStr, "mkey:"), "Machine key should start with mkey:")
|
||||
|
||||
case "yaml":
|
||||
var result map[string]interface{}
|
||||
err := yaml.Unmarshal([]byte(output), &result)
|
||||
require.NoError(t, err, "Output should be valid YAML")
|
||||
|
||||
privateKey, exists := result["private_key"]
|
||||
require.True(t, exists, "YAML should contain private_key field")
|
||||
|
||||
keyStr, ok := privateKey.(string)
|
||||
require.True(t, ok, "private_key should be a string")
|
||||
require.NotEmpty(t, keyStr, "private_key should not be empty")
|
||||
|
||||
assert.True(t, strings.HasPrefix(keyStr, "mkey:"), "Machine key should start with mkey:")
|
||||
|
||||
default:
|
||||
// Default format should just be the key itself
|
||||
assert.True(t, strings.HasPrefix(output, "mkey:"), "Default output should be the machine key")
|
||||
assert.NotContains(t, output, "{", "Default output should not contain JSON")
|
||||
assert.NotContains(t, output, "private_key:", "Default output should not contain YAML structure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrivateKeyOutputFormats(t *testing.T) {
|
||||
// Test cases for different output formats
|
||||
// These test the validation logic we would use after refactoring
|
||||
|
||||
tests := []struct {
|
||||
format string
|
||||
sample string
|
||||
}{
|
||||
{
|
||||
format: "json",
|
||||
sample: `{"private_key": "mkey:abcd1234567890abcd1234567890abcd1234567890abcd1234567890abcd1234"}`,
|
||||
},
|
||||
{
|
||||
format: "yaml",
|
||||
sample: "private_key: mkey:abcd1234567890abcd1234567890abcd1234567890abcd1234567890abcd1234\n",
|
||||
},
|
||||
{
|
||||
format: "",
|
||||
sample: "mkey:abcd1234567890abcd1234567890abcd1234567890abcd1234567890abcd1234",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run("format_"+tt.format, func(t *testing.T) {
|
||||
validatePrivateKeyOutput(t, tt.sample, tt.format)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package cli
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -15,11 +14,6 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Error is used to compare errors as per https://dave.cheney.net/2016/04/07/constant-errors
|
||||
type Error string
|
||||
|
||||
func (e Error) Error() string { return string(e) }
|
||||
|
||||
const (
|
||||
errMockOidcClientIDNotDefined = Error("MOCKOIDC_CLIENT_ID not defined")
|
||||
errMockOidcClientSecretNotDefined = Error("MOCKOIDC_CLIENT_SECRET not defined")
|
||||
@@ -74,7 +68,7 @@ func mockOIDC() error {
|
||||
|
||||
userStr := os.Getenv("MOCKOIDC_USERS")
|
||||
if userStr == "" {
|
||||
return errors.New("MOCKOIDC_USERS not defined")
|
||||
return fmt.Errorf("MOCKOIDC_USERS not defined")
|
||||
}
|
||||
|
||||
var users []mockoidc.MockUser
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/netip"
|
||||
@@ -14,7 +13,6 @@ import (
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/samber/lo"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
"tailscale.com/types/key"
|
||||
@@ -22,23 +20,23 @@ import (
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(nodeCmd)
|
||||
// User filtering
|
||||
listNodesCmd.Flags().StringP("user", "u", "", "Filter by user")
|
||||
// Node filtering
|
||||
listNodesCmd.Flags().StringP("node", "", "", "Filter by node (ID, name, hostname, or IP)")
|
||||
listNodesCmd.Flags().Uint64P("id", "", 0, "Filter by node ID")
|
||||
listNodesCmd.Flags().StringP("name", "", "", "Filter by node hostname")
|
||||
listNodesCmd.Flags().StringP("ip", "", "", "Filter by node IP address")
|
||||
// Display options
|
||||
listNodesCmd.Flags().BoolP("tags", "t", false, "Show tags")
|
||||
listNodesCmd.Flags().String("columns", "", "Comma-separated list of columns to display")
|
||||
|
||||
listNodesCmd.Flags().StringP("namespace", "n", "", "User")
|
||||
listNodesNamespaceFlag := listNodesCmd.Flags().Lookup("namespace")
|
||||
listNodesNamespaceFlag.Deprecated = deprecateNamespaceMessage
|
||||
listNodesNamespaceFlag.Hidden = true
|
||||
|
||||
nodeCmd.AddCommand(listNodesCmd)
|
||||
|
||||
listNodeRoutesCmd.Flags().StringP("node", "n", "", "Node identifier (ID, name, hostname, or IP)")
|
||||
nodeCmd.AddCommand(listNodeRoutesCmd)
|
||||
|
||||
registerNodeCmd.Flags().StringP("user", "u", "", "User")
|
||||
|
||||
registerNodeCmd.Flags().StringP("namespace", "n", "", "User")
|
||||
registerNodeNamespaceFlag := registerNodeCmd.Flags().Lookup("namespace")
|
||||
registerNodeNamespaceFlag.Deprecated = deprecateNamespaceMessage
|
||||
registerNodeNamespaceFlag.Hidden = true
|
||||
|
||||
err := registerNodeCmd.MarkFlagRequired("user")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
@@ -50,45 +48,56 @@ func init() {
|
||||
}
|
||||
nodeCmd.AddCommand(registerNodeCmd)
|
||||
|
||||
expireNodeCmd.Flags().StringP("node", "n", "", "Node identifier (ID, name, hostname, or IP)")
|
||||
expireNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = expireNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(expireNodeCmd)
|
||||
|
||||
renameNodeCmd.Flags().StringP("node", "n", "", "Node identifier (ID, name, hostname, or IP)")
|
||||
renameNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = renameNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(renameNodeCmd)
|
||||
|
||||
deleteNodeCmd.Flags().StringP("node", "n", "", "Node identifier (ID, name, hostname, or IP)")
|
||||
deleteNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = deleteNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(deleteNodeCmd)
|
||||
|
||||
moveNodeCmd.Flags().StringP("node", "n", "", "Node identifier (ID, name, hostname, or IP)")
|
||||
moveNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
|
||||
err = moveNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
moveNodeCmd.Flags().StringP("user", "u", "", "New user (ID, name, or email)")
|
||||
moveNodeCmd.Flags().String("name", "", "New username")
|
||||
moveNodeCmd.Flags().StringP("user", "u", "", "New user")
|
||||
|
||||
// One of --user or --name is required (checked in GetUserIdentifier)
|
||||
moveNodeCmd.Flags().StringP("namespace", "n", "", "User")
|
||||
moveNodeNamespaceFlag := moveNodeCmd.Flags().Lookup("namespace")
|
||||
moveNodeNamespaceFlag.Deprecated = deprecateNamespaceMessage
|
||||
moveNodeNamespaceFlag.Hidden = true
|
||||
|
||||
err = moveNodeCmd.MarkFlagRequired("user")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(moveNodeCmd)
|
||||
|
||||
tagCmd.Flags().StringP("node", "n", "", "Node identifier (ID, name, hostname, or IP)")
|
||||
tagCmd.MarkFlagRequired("node")
|
||||
tagCmd.Flags().StringSliceP("tags", "t", []string{}, "List of tags to add to the node")
|
||||
nodeCmd.AddCommand(tagCmd)
|
||||
tagCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
|
||||
approveRoutesCmd.Flags().StringP("node", "n", "", "Node identifier (ID, name, hostname, or IP)")
|
||||
approveRoutesCmd.MarkFlagRequired("node")
|
||||
approveRoutesCmd.Flags().StringSliceP("routes", "r", []string{}, `List of routes that will be approved (comma-separated, e.g. "10.0.0.0/8,192.168.0.0/24" or empty string to remove all approved routes)`)
|
||||
nodeCmd.AddCommand(approveRoutesCmd)
|
||||
err = tagCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
tagCmd.Flags().
|
||||
StringSliceP("tags", "t", []string{}, "List of tags to add to the node")
|
||||
nodeCmd.AddCommand(tagCmd)
|
||||
|
||||
nodeCmd.AddCommand(backfillNodeIPsCmd)
|
||||
}
|
||||
@@ -103,13 +112,16 @@ var registerNodeCmd = &cobra.Command{
|
||||
Use: "register",
|
||||
Short: "Registers a node to your network",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
user, err := cmd.Flags().GetString("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
registrationID, err := cmd.Flags().GetString("key")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
@@ -117,36 +129,28 @@ var registerNodeCmd = &cobra.Command{
|
||||
fmt.Sprintf("Error getting node key from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
request := &v1.RegisterNodeRequest{
|
||||
Key: registrationID,
|
||||
User: user,
|
||||
}
|
||||
request := &v1.RegisterNodeRequest{
|
||||
Key: registrationID,
|
||||
User: user,
|
||||
}
|
||||
|
||||
response, err := client.RegisterNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot register node: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
SuccessOutput(
|
||||
response.GetNode(),
|
||||
fmt.Sprintf("Node %s registered", response.GetNode().GetGivenName()), output)
|
||||
return nil
|
||||
})
|
||||
response, err := client.RegisterNode(ctx, request)
|
||||
if err != nil {
|
||||
return
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot register node: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(
|
||||
response.GetNode(),
|
||||
fmt.Sprintf("Node %s registered", response.GetNode().GetGivenName()), output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -155,150 +159,49 @@ var listNodesCmd = &cobra.Command{
|
||||
Short: "List nodes",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
user, err := cmd.Flags().GetString("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
}
|
||||
showTags, err := cmd.Flags().GetBool("tags")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting tags flag: %s", err), output)
|
||||
return
|
||||
}
|
||||
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
request := &v1.ListNodesRequest{}
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
// Handle user filtering (existing functionality)
|
||||
if user, _ := cmd.Flags().GetString("user"); user != "" {
|
||||
request.User = user
|
||||
}
|
||||
|
||||
// Handle node filtering (new functionality)
|
||||
if nodeFlag, _ := cmd.Flags().GetString("node"); nodeFlag != "" {
|
||||
// Use smart lookup to determine filter type
|
||||
if id, err := strconv.ParseUint(nodeFlag, 10, 64); err == nil && id > 0 {
|
||||
request.Id = id
|
||||
} else if isIPAddress(nodeFlag) {
|
||||
request.IpAddresses = []string{nodeFlag}
|
||||
} else {
|
||||
request.Name = nodeFlag
|
||||
}
|
||||
} else {
|
||||
// Check specific filter flags
|
||||
if id, _ := cmd.Flags().GetUint64("id"); id > 0 {
|
||||
request.Id = id
|
||||
} else if name, _ := cmd.Flags().GetString("name"); name != "" {
|
||||
request.Name = name
|
||||
} else if ip, _ := cmd.Flags().GetString("ip"); ip != "" {
|
||||
request.IpAddresses = []string{ip}
|
||||
}
|
||||
}
|
||||
|
||||
response, err := client.ListNodes(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot get nodes: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetNodes(), "", output)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get user for table display (if filtering by user)
|
||||
userFilter := request.User
|
||||
tableData, err := nodesToPtables(userFilter, showTags, response.GetNodes())
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
return err
|
||||
}
|
||||
|
||||
tableData = FilterTableColumns(cmd, tableData)
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
request := &v1.ListNodesRequest{
|
||||
User: user,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var listNodeRoutesCmd = &cobra.Command{
|
||||
Use: "list-routes",
|
||||
Short: "List routes available on nodes",
|
||||
Aliases: []string{"lsr", "routes"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
identifier, err := GetNodeIdentifier(cmd)
|
||||
response, err := client.ListNodes(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting node identifier: %s", err),
|
||||
fmt.Sprintf("Cannot get nodes: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
request := &v1.ListNodesRequest{}
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetNodes(), "", output)
|
||||
}
|
||||
|
||||
response, err := client.ListNodes(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot get nodes: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetNodes(), "", output)
|
||||
return nil
|
||||
}
|
||||
|
||||
nodes := response.GetNodes()
|
||||
if identifier != 0 {
|
||||
for _, node := range response.GetNodes() {
|
||||
if node.GetId() == identifier {
|
||||
nodes = []*v1.Node{node}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodes = lo.Filter(nodes, func(n *v1.Node, _ int) bool {
|
||||
return (n.GetSubnetRoutes() != nil && len(n.GetSubnetRoutes()) > 0) || (n.GetApprovedRoutes() != nil && len(n.GetApprovedRoutes()) > 0) || (n.GetAvailableRoutes() != nil && len(n.GetAvailableRoutes()) > 0)
|
||||
})
|
||||
|
||||
tableData, err := nodeRoutesToPtables(nodes)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
return err
|
||||
}
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
tableData, err := nodesToPtables(user, showTags, response.GetNodes())
|
||||
if err != nil {
|
||||
return
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
}
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -309,42 +212,42 @@ var expireNodeCmd = &cobra.Command{
|
||||
Long: "Expiring a node will keep the node in the database and force it to reauthenticate.",
|
||||
Aliases: []string{"logout", "exp", "e"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
identifier, err := GetNodeIdentifier(cmd)
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting node identifier: %s", err),
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
request := &v1.ExpireNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.ExpireNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot expire node: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
request := &v1.ExpireNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetNode(), "Node expired", output)
|
||||
return nil
|
||||
})
|
||||
response, err := client.ExpireNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot expire node: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetNode(), "Node expired", output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -352,48 +255,47 @@ var renameNodeCmd = &cobra.Command{
|
||||
Use: "rename NEW_NAME",
|
||||
Short: "Renames a node in your network",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
identifier, err := GetNodeIdentifier(cmd)
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting node identifier: %s", err),
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
newName := ""
|
||||
if len(args) > 0 {
|
||||
newName = args[0]
|
||||
}
|
||||
request := &v1.RenameNodeRequest{
|
||||
NodeId: identifier,
|
||||
NewName: newName,
|
||||
}
|
||||
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
request := &v1.RenameNodeRequest{
|
||||
NodeId: identifier,
|
||||
NewName: newName,
|
||||
}
|
||||
|
||||
response, err := client.RenameNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot rename node: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetNode(), "Node renamed", output)
|
||||
return nil
|
||||
})
|
||||
response, err := client.RenameNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot rename node: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetNode(), "Node renamed", output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -402,47 +304,52 @@ var deleteNodeCmd = &cobra.Command{
|
||||
Short: "Delete a node",
|
||||
Aliases: []string{"del"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
identifier, err := GetNodeIdentifier(cmd)
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting node identifier: %s", err),
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var nodeName string
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
getRequest := &v1.GetNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
getResponse, err := client.GetNode(ctx, getRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error getting node node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
nodeName = getResponse.GetNode().GetName()
|
||||
return nil
|
||||
})
|
||||
getRequest := &v1.GetNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
|
||||
getResponse, err := client.GetNode(ctx, getRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error getting node node: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
deleteRequest := &v1.DeleteNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
|
||||
confirm := false
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
if !force {
|
||||
prompt := &survey.Confirm{
|
||||
Message: fmt.Sprintf(
|
||||
"Do you want to remove the node %s?",
|
||||
nodeName,
|
||||
getResponse.GetNode().GetName(),
|
||||
),
|
||||
}
|
||||
err = survey.AskOne(prompt, &confirm)
|
||||
@@ -452,34 +359,29 @@ var deleteNodeCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if confirm || force {
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
deleteRequest := &v1.DeleteNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
response, err := client.DeleteNode(ctx, deleteRequest)
|
||||
if output != "" {
|
||||
SuccessOutput(response, "", output)
|
||||
|
||||
response, err := client.DeleteNode(ctx, deleteRequest)
|
||||
if output != "" {
|
||||
SuccessOutput(response, "", output)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error deleting node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
SuccessOutput(
|
||||
map[string]string{"Result": "Node deleted"},
|
||||
"Node deleted",
|
||||
output,
|
||||
)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error deleting node: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
SuccessOutput(
|
||||
map[string]string{"Result": "Node deleted"},
|
||||
"Node deleted",
|
||||
output,
|
||||
)
|
||||
} else {
|
||||
SuccessOutput(map[string]string{"Result": "Node not deleted"}, "Node not deleted", output)
|
||||
}
|
||||
@@ -491,71 +393,78 @@ var moveNodeCmd = &cobra.Command{
|
||||
Short: "Move node to another user",
|
||||
Aliases: []string{"mv"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
identifier, err := GetNodeIdentifier(cmd)
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting node identifier: %s", err),
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
userID, err := GetUserIdentifier(cmd)
|
||||
user, err := cmd.Flags().GetString("user")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting user: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
getRequest := &v1.GetNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
_, err := client.GetNode(ctx, getRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error getting node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
getRequest := &v1.GetNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
|
||||
moveRequest := &v1.MoveNodeRequest{
|
||||
NodeId: identifier,
|
||||
User: userID,
|
||||
}
|
||||
|
||||
moveResponse, err := client.MoveNode(ctx, moveRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error moving node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
SuccessOutput(moveResponse.GetNode(), "Node moved to another user", output)
|
||||
return nil
|
||||
})
|
||||
_, err = client.GetNode(ctx, getRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error getting node: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
moveRequest := &v1.MoveNodeRequest{
|
||||
NodeId: identifier,
|
||||
User: user,
|
||||
}
|
||||
|
||||
moveResponse, err := client.MoveNode(ctx, moveRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error moving node: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(moveResponse.GetNode(), "Node moved to another user", output)
|
||||
},
|
||||
}
|
||||
|
||||
var backfillNodeIPsCmd = &cobra.Command{
|
||||
Use: "backfill-ips",
|
||||
Short: "Backfill IPs missing from nodes",
|
||||
Aliases: []string{"backfillips"},
|
||||
Use: "backfillips",
|
||||
Short: "Backfill IPs missing from nodes",
|
||||
Long: `
|
||||
Backfill IPs can be used to add/remove IPs from nodes
|
||||
based on the current configuration of Headscale.
|
||||
@@ -570,7 +479,7 @@ it can be run to remove the IPs that should no longer
|
||||
be assigned to nodes.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var err error
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
confirm := false
|
||||
prompt := &survey.Confirm{
|
||||
@@ -581,23 +490,25 @@ be assigned to nodes.`,
|
||||
return
|
||||
}
|
||||
if confirm {
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
changes, err := client.BackfillNodeIPs(ctx, &v1.BackfillNodeIPsRequest{Confirmed: confirm})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error backfilling IPs: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
SuccessOutput(changes, "Node IPs backfilled successfully", output)
|
||||
return nil
|
||||
})
|
||||
changes, err := client.BackfillNodeIPs(ctx, &v1.BackfillNodeIPsRequest{Confirmed: confirm})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error backfilling IPs: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(changes, "Node IPs backfilled successfully", output)
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -640,14 +551,14 @@ func nodesToPtables(
|
||||
var lastSeenTime string
|
||||
if node.GetLastSeen() != nil {
|
||||
lastSeen = node.GetLastSeen().AsTime()
|
||||
lastSeenTime = lastSeen.Format(HeadscaleDateTimeFormat)
|
||||
lastSeenTime = lastSeen.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
var expiry time.Time
|
||||
var expiryTime string
|
||||
if node.GetExpiry() != nil {
|
||||
expiry = node.GetExpiry().AsTime()
|
||||
expiryTime = expiry.Format(HeadscaleDateTimeFormat)
|
||||
expiryTime = expiry.Format("2006-01-02 15:04:05")
|
||||
} else {
|
||||
expiryTime = "N/A"
|
||||
}
|
||||
@@ -746,50 +657,25 @@ func nodesToPtables(
|
||||
return tableData, nil
|
||||
}
|
||||
|
||||
func nodeRoutesToPtables(
|
||||
nodes []*v1.Node,
|
||||
) (pterm.TableData, error) {
|
||||
tableHeader := []string{
|
||||
"ID",
|
||||
"Hostname",
|
||||
"Approved",
|
||||
"Available",
|
||||
"Serving (Primary)",
|
||||
}
|
||||
tableData := pterm.TableData{tableHeader}
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeData := []string{
|
||||
strconv.FormatUint(node.GetId(), util.Base10),
|
||||
node.GetGivenName(),
|
||||
strings.Join(node.GetApprovedRoutes(), ", "),
|
||||
strings.Join(node.GetAvailableRoutes(), ", "),
|
||||
strings.Join(node.GetSubnetRoutes(), ", "),
|
||||
}
|
||||
tableData = append(
|
||||
tableData,
|
||||
nodeData,
|
||||
)
|
||||
}
|
||||
|
||||
return tableData, nil
|
||||
}
|
||||
|
||||
var tagCmd = &cobra.Command{
|
||||
Use: "tag",
|
||||
Short: "Manage the tags of a node",
|
||||
Aliases: []string{"tags", "t"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
// retrieve flags from CLI
|
||||
identifier, err := GetNodeIdentifier(cmd)
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting node identifier: %s", err),
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
tagsToSet, err := cmd.Flags().GetStringSlice("tags")
|
||||
@@ -799,93 +685,32 @@ var tagCmd = &cobra.Command{
|
||||
fmt.Sprintf("Error retrieving list of tags to add to node, %v", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
// Sending tags to node
|
||||
request := &v1.SetTagsRequest{
|
||||
NodeId: identifier,
|
||||
Tags: tagsToSet,
|
||||
}
|
||||
resp, err := client.SetTags(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error while sending tags to headscale: %s", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
SuccessOutput(
|
||||
resp.GetNode(),
|
||||
"Node updated",
|
||||
output,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
// Sending tags to node
|
||||
request := &v1.SetTagsRequest{
|
||||
NodeId: identifier,
|
||||
Tags: tagsToSet,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var approveRoutesCmd = &cobra.Command{
|
||||
Use: "approve-routes",
|
||||
Short: "Manage the approved routes of a node",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
|
||||
// retrieve flags from CLI
|
||||
identifier, err := GetNodeIdentifier(cmd)
|
||||
resp, err := client.SetTags(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting node identifier: %s", err),
|
||||
output,
|
||||
)
|
||||
return
|
||||
}
|
||||
routes, err := cmd.Flags().GetStringSlice("routes")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error retrieving list of routes to add to node, %v", err),
|
||||
fmt.Sprintf("Error while sending tags to headscale: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
// Sending routes to node
|
||||
request := &v1.SetApprovedRoutesRequest{
|
||||
NodeId: identifier,
|
||||
Routes: routes,
|
||||
}
|
||||
resp, err := client.SetApprovedRoutes(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error while sending routes to headscale: %s", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
SuccessOutput(
|
||||
resp.GetNode(),
|
||||
"Node updated",
|
||||
output,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
if resp != nil {
|
||||
SuccessOutput(
|
||||
resp.GetNode(),
|
||||
"Node updated",
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,17 +1,13 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"tailscale.com/types/views"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -23,12 +19,6 @@ func init() {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
policyCmd.AddCommand(setPolicy)
|
||||
|
||||
checkPolicy.Flags().StringP("file", "f", "", "Path to a policy file in HuJSON format")
|
||||
if err := checkPolicy.MarkFlagRequired("file"); err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
policyCmd.AddCommand(checkPolicy)
|
||||
}
|
||||
|
||||
var policyCmd = &cobra.Command{
|
||||
@@ -41,26 +31,22 @@ var getPolicy = &cobra.Command{
|
||||
Short: "Print the current ACL Policy",
|
||||
Aliases: []string{"show", "view", "fetch"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
err := WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
request := &v1.GetPolicyRequest{}
|
||||
request := &v1.GetPolicyRequest{}
|
||||
|
||||
response, err := client.GetPolicy(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed loading ACL Policy: %s", err), output)
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(pallabpain): Maybe print this better?
|
||||
// This does not pass output as we dont support yaml, json or json-line
|
||||
// output for this command. It is HuJSON already.
|
||||
SuccessOutput("", response.GetPolicy(), "")
|
||||
return nil
|
||||
})
|
||||
response, err := client.GetPolicy(ctx, request)
|
||||
if err != nil {
|
||||
return
|
||||
ErrorOutput(err, fmt.Sprintf("Failed loading ACL Policy: %s", err), output)
|
||||
}
|
||||
|
||||
// TODO(pallabpain): Maybe print this better?
|
||||
// This does not pass output as we dont support yaml, json or json-line
|
||||
// output for this command. It is HuJSON already.
|
||||
SuccessOutput("", response.GetPolicy(), "")
|
||||
},
|
||||
}
|
||||
|
||||
@@ -72,65 +58,30 @@ var setPolicy = &cobra.Command{
|
||||
This command only works when the acl.policy_mode is set to "db", and the policy will be stored in the database.`,
|
||||
Aliases: []string{"put", "update"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
policyPath, _ := cmd.Flags().GetString("file")
|
||||
|
||||
f, err := os.Open(policyPath)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error opening the policy file: %s", err), output)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
policyBytes, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error reading the policy file: %s", err), output)
|
||||
return
|
||||
}
|
||||
|
||||
request := &v1.SetPolicyRequest{Policy: string(policyBytes)}
|
||||
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
if _, err := client.SetPolicy(ctx, request); err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to set ACL Policy: %s", err), output)
|
||||
return err
|
||||
}
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
SuccessOutput(nil, "Policy updated.", "")
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
if _, err := client.SetPolicy(ctx, request); err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to set ACL Policy: %s", err), output)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var checkPolicy = &cobra.Command{
|
||||
Use: "check",
|
||||
Short: "Check the Policy file for errors",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
policyPath, _ := cmd.Flags().GetString("file")
|
||||
|
||||
f, err := os.Open(policyPath)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error opening the policy file: %s", err), output)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
policyBytes, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error reading the policy file: %s", err), output)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = policy.NewPolicyManager(policyBytes, nil, views.Slice[types.NodeView]{})
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error parsing the policy file: %s", err), output)
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(nil, "Policy is valid", "")
|
||||
|
||||
SuccessOutput(nil, "Policy updated.", "")
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -15,9 +14,18 @@ import (
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultPreAuthKeyExpiry = "1h"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(preauthkeysCmd)
|
||||
preauthkeysCmd.PersistentFlags().Uint64P("user", "u", 0, "User identifier (ID)")
|
||||
preauthkeysCmd.PersistentFlags().StringP("user", "u", "", "User")
|
||||
|
||||
preauthkeysCmd.PersistentFlags().StringP("namespace", "n", "", "User")
|
||||
pakNamespaceFlag := preauthkeysCmd.PersistentFlags().Lookup("namespace")
|
||||
pakNamespaceFlag.Deprecated = deprecateNamespaceMessage
|
||||
pakNamespaceFlag.Hidden = true
|
||||
|
||||
err := preauthkeysCmd.MarkPersistentFlagRequired("user")
|
||||
if err != nil {
|
||||
@@ -47,85 +55,81 @@ var listPreAuthKeys = &cobra.Command{
|
||||
Short: "List the preauthkeys for this user",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
user, err := cmd.Flags().GetUint64("user")
|
||||
user, err := cmd.Flags().GetString("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ListPreAuthKeysRequest{
|
||||
User: user,
|
||||
}
|
||||
|
||||
response, err := client.ListPreAuthKeys(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting the list of keys: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
request := &v1.ListPreAuthKeysRequest{
|
||||
User: user,
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetPreAuthKeys(), "", output)
|
||||
}
|
||||
|
||||
tableData := pterm.TableData{
|
||||
{
|
||||
"ID",
|
||||
"Key",
|
||||
"Reusable",
|
||||
"Ephemeral",
|
||||
"Used",
|
||||
"Expiration",
|
||||
"Created",
|
||||
"Tags",
|
||||
},
|
||||
}
|
||||
for _, key := range response.GetPreAuthKeys() {
|
||||
expiration := "-"
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = ColourTime(key.GetExpiration().AsTime())
|
||||
}
|
||||
|
||||
response, err := client.ListPreAuthKeys(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting the list of keys: %s", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
aclTags := ""
|
||||
|
||||
for _, tag := range key.GetAclTags() {
|
||||
aclTags += "," + tag
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetPreAuthKeys(), "", output)
|
||||
return nil
|
||||
}
|
||||
aclTags = strings.TrimLeft(aclTags, ",")
|
||||
|
||||
tableData := pterm.TableData{
|
||||
{
|
||||
"ID",
|
||||
"Key",
|
||||
"Reusable",
|
||||
"Ephemeral",
|
||||
"Used",
|
||||
"Expiration",
|
||||
"Created",
|
||||
"Tags",
|
||||
},
|
||||
}
|
||||
for _, key := range response.GetPreAuthKeys() {
|
||||
expiration := "-"
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = ColourTime(key.GetExpiration().AsTime())
|
||||
}
|
||||
tableData = append(tableData, []string{
|
||||
key.GetId(),
|
||||
key.GetKey(),
|
||||
strconv.FormatBool(key.GetReusable()),
|
||||
strconv.FormatBool(key.GetEphemeral()),
|
||||
strconv.FormatBool(key.GetUsed()),
|
||||
expiration,
|
||||
key.GetCreatedAt().AsTime().Format("2006-01-02 15:04:05"),
|
||||
aclTags,
|
||||
})
|
||||
|
||||
aclTags := ""
|
||||
|
||||
for _, tag := range key.GetAclTags() {
|
||||
aclTags += "," + tag
|
||||
}
|
||||
|
||||
aclTags = strings.TrimLeft(aclTags, ",")
|
||||
|
||||
tableData = append(tableData, []string{
|
||||
strconv.FormatUint(key.GetId(), 10),
|
||||
key.GetKey(),
|
||||
strconv.FormatBool(key.GetReusable()),
|
||||
strconv.FormatBool(key.GetEphemeral()),
|
||||
strconv.FormatBool(key.GetUsed()),
|
||||
expiration,
|
||||
key.GetCreatedAt().AsTime().Format(HeadscaleDateTimeFormat),
|
||||
aclTags,
|
||||
})
|
||||
|
||||
}
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
return
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -135,12 +139,11 @@ var createPreAuthKeyCmd = &cobra.Command{
|
||||
Short: "Creates a new preauthkey in the specified user",
|
||||
Aliases: []string{"c", "new"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
user, err := cmd.Flags().GetUint64("user")
|
||||
user, err := cmd.Flags().GetString("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
return
|
||||
}
|
||||
|
||||
reusable, _ := cmd.Flags().GetBool("reusable")
|
||||
@@ -163,7 +166,6 @@ var createPreAuthKeyCmd = &cobra.Command{
|
||||
fmt.Sprintf("Could not parse duration: %s\n", err),
|
||||
output,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
expiration := time.Now().UTC().Add(time.Duration(duration))
|
||||
@@ -174,23 +176,20 @@ var createPreAuthKeyCmd = &cobra.Command{
|
||||
|
||||
request.Expiration = timestamppb.New(expiration)
|
||||
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
response, err := client.CreatePreAuthKey(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot create Pre Auth Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
SuccessOutput(response.GetPreAuthKey(), response.GetPreAuthKey().GetKey(), output)
|
||||
return nil
|
||||
})
|
||||
response, err := client.CreatePreAuthKey(ctx, request)
|
||||
if err != nil {
|
||||
return
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot create Pre Auth Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetPreAuthKey(), response.GetPreAuthKey().GetKey(), output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -206,34 +205,30 @@ var expirePreAuthKeyCmd = &cobra.Command{
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
user, err := cmd.Flags().GetUint64("user")
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
user, err := cmd.Flags().GetString("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
return
|
||||
}
|
||||
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
request := &v1.ExpirePreAuthKeyRequest{
|
||||
User: user,
|
||||
Key: args[0],
|
||||
}
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.ExpirePreAuthKey(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot expire Pre Auth Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
request := &v1.ExpirePreAuthKeyRequest{
|
||||
User: user,
|
||||
Key: args[0],
|
||||
}
|
||||
|
||||
SuccessOutput(response, "Key expired", output)
|
||||
return nil
|
||||
})
|
||||
response, err := client.ExpirePreAuthKey(ctx, request)
|
||||
if err != nil {
|
||||
return
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot expire Pre Auth Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(response, "Key expired", output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
)
|
||||
|
||||
func ColourTime(date time.Time) string {
|
||||
dateStr := date.Format(HeadscaleDateTimeFormat)
|
||||
dateStr := date.Format("2006-01-02 15:04:05")
|
||||
|
||||
if date.After(time.Now()) {
|
||||
dateStr = pterm.LightGreen(dateStr)
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog"
|
||||
@@ -14,6 +13,10 @@ import (
|
||||
"github.com/tcnksm/go-latest"
|
||||
)
|
||||
|
||||
const (
|
||||
deprecateNamespaceMessage = "use --user"
|
||||
)
|
||||
|
||||
var cfgFile string = ""
|
||||
|
||||
func init() {
|
||||
@@ -22,11 +25,6 @@ func init() {
|
||||
return
|
||||
}
|
||||
|
||||
if slices.Contains(os.Args, "policy") && slices.Contains(os.Args, "check") {
|
||||
zerolog.SetGlobalLevel(zerolog.Disabled)
|
||||
return
|
||||
}
|
||||
|
||||
cobra.OnInitialize(initConfig)
|
||||
rootCmd.PersistentFlags().
|
||||
StringVarP(&cfgFile, "config", "c", "", "config file (default is /etc/headscale/config.yaml)")
|
||||
@@ -60,26 +58,26 @@ func initConfig() {
|
||||
zerolog.SetGlobalLevel(zerolog.Disabled)
|
||||
}
|
||||
|
||||
logFormat := viper.GetString("log.format")
|
||||
if logFormat == types.JSONLogFormat {
|
||||
log.Logger = log.Output(os.Stdout)
|
||||
}
|
||||
// logFormat := viper.GetString("log.format")
|
||||
// if logFormat == types.JSONLogFormat {
|
||||
// log.Logger = log.Output(os.Stdout)
|
||||
// }
|
||||
|
||||
disableUpdateCheck := viper.GetBool("disable_check_updates")
|
||||
if !disableUpdateCheck && !machineOutput {
|
||||
if (runtime.GOOS == "linux" || runtime.GOOS == "darwin") &&
|
||||
types.Version != "dev" {
|
||||
Version != "dev" {
|
||||
githubTag := &latest.GithubTag{
|
||||
Owner: "juanfont",
|
||||
Repository: "headscale",
|
||||
}
|
||||
res, err := latest.Check(githubTag, types.Version)
|
||||
res, err := latest.Check(githubTag, Version)
|
||||
if err == nil && res.Outdated {
|
||||
//nolint
|
||||
log.Warn().Msgf(
|
||||
"An updated version of Headscale has been found (%s vs. your current %s). Check it out https://github.com/juanfont/headscale/releases\n",
|
||||
res.Current,
|
||||
types.Version,
|
||||
Version,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
271
cmd/headscale/cli/routes.go
Normal file
271
cmd/headscale/cli/routes.go
Normal file
@@ -0,0 +1,271 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
"tailscale.com/net/tsaddr"
|
||||
)
|
||||
|
||||
const (
|
||||
Base10 = 10
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(routesCmd)
|
||||
listRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
routesCmd.AddCommand(listRoutesCmd)
|
||||
|
||||
enableRouteCmd.Flags().Uint64P("route", "r", 0, "Route identifier (ID)")
|
||||
err := enableRouteCmd.MarkFlagRequired("route")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
routesCmd.AddCommand(enableRouteCmd)
|
||||
|
||||
disableRouteCmd.Flags().Uint64P("route", "r", 0, "Route identifier (ID)")
|
||||
err = disableRouteCmd.MarkFlagRequired("route")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
routesCmd.AddCommand(disableRouteCmd)
|
||||
|
||||
deleteRouteCmd.Flags().Uint64P("route", "r", 0, "Route identifier (ID)")
|
||||
err = deleteRouteCmd.MarkFlagRequired("route")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
routesCmd.AddCommand(deleteRouteCmd)
|
||||
}
|
||||
|
||||
var routesCmd = &cobra.Command{
|
||||
Use: "routes",
|
||||
Short: "Manage the routes of Headscale",
|
||||
Aliases: []string{"r", "route"},
|
||||
}
|
||||
|
||||
var listRoutesCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all routes",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
machineID, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine id from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
var routes []*v1.Route
|
||||
|
||||
if machineID == 0 {
|
||||
response, err := client.GetRoutes(ctx, &v1.GetRoutesRequest{})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get nodes: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetRoutes(), "", output)
|
||||
}
|
||||
|
||||
routes = response.GetRoutes()
|
||||
} else {
|
||||
response, err := client.GetNodeRoutes(ctx, &v1.GetNodeRoutesRequest{
|
||||
NodeId: machineID,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get routes for node %d: %s", machineID, status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetRoutes(), "", output)
|
||||
}
|
||||
|
||||
routes = response.GetRoutes()
|
||||
}
|
||||
|
||||
tableData := routesToPtables(routes)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
}
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var enableRouteCmd = &cobra.Command{
|
||||
Use: "enable",
|
||||
Short: "Set a route as enabled",
|
||||
Long: `This command will make as enabled a given route.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
routeID, err := cmd.Flags().GetUint64("route")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine id from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.EnableRoute(ctx, &v1.EnableRouteRequest{
|
||||
RouteId: routeID,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot enable route %d: %s", routeID, status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response, "", output)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var disableRouteCmd = &cobra.Command{
|
||||
Use: "disable",
|
||||
Short: "Set as disabled a given route",
|
||||
Long: `This command will make as disabled a given route.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
routeID, err := cmd.Flags().GetUint64("route")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine id from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.DisableRoute(ctx, &v1.DisableRouteRequest{
|
||||
RouteId: routeID,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot disable route %d: %s", routeID, status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response, "", output)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var deleteRouteCmd = &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete a given route",
|
||||
Long: `This command will delete a given route.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
routeID, err := cmd.Flags().GetUint64("route")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine id from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.DeleteRoute(ctx, &v1.DeleteRouteRequest{
|
||||
RouteId: routeID,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot delete route %d: %s", routeID, status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response, "", output)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// routesToPtables converts the list of routes to a nice table.
|
||||
func routesToPtables(routes []*v1.Route) pterm.TableData {
|
||||
tableData := pterm.TableData{{"ID", "Node", "Prefix", "Advertised", "Enabled", "Primary"}}
|
||||
|
||||
for _, route := range routes {
|
||||
var isPrimaryStr string
|
||||
prefix, err := netip.ParsePrefix(route.GetPrefix())
|
||||
if err != nil {
|
||||
log.Printf("Error parsing prefix %s: %s", route.GetPrefix(), err)
|
||||
|
||||
continue
|
||||
}
|
||||
if tsaddr.IsExitRoute(prefix) {
|
||||
isPrimaryStr = "-"
|
||||
} else {
|
||||
isPrimaryStr = strconv.FormatBool(route.GetIsPrimary())
|
||||
}
|
||||
|
||||
var nodeName string
|
||||
if route.GetNode() != nil {
|
||||
nodeName = route.GetNode().GetGivenName()
|
||||
}
|
||||
|
||||
tableData = append(tableData,
|
||||
[]string{
|
||||
strconv.FormatUint(route.GetId(), Base10),
|
||||
nodeName,
|
||||
route.GetPrefix(),
|
||||
strconv.FormatBool(route.GetAdvertised()),
|
||||
strconv.FormatBool(route.GetEnabled()),
|
||||
isPrimaryStr,
|
||||
})
|
||||
}
|
||||
|
||||
return tableData
|
||||
}
|
||||
@@ -2,12 +2,10 @@ package cli
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tailscale/squibble"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -23,12 +21,6 @@ var serveCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app, err := newHeadscaleServerWithConfig()
|
||||
if err != nil {
|
||||
var squibbleErr squibble.ValidationError
|
||||
if errors.As(err, &squibbleErr) {
|
||||
fmt.Printf("SQLite schema failed to validate:\n")
|
||||
fmt.Println(squibbleErr.Diff)
|
||||
}
|
||||
|
||||
log.Fatal().Caller().Err(err).Msg("Error initializing")
|
||||
}
|
||||
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestServeCommand(t *testing.T) {
|
||||
// Test that the serve command exists and is properly configured
|
||||
assert.NotNil(t, serveCmd)
|
||||
assert.Equal(t, "serve", serveCmd.Use)
|
||||
assert.Equal(t, "Launches the headscale server", serveCmd.Short)
|
||||
assert.NotNil(t, serveCmd.Run)
|
||||
assert.NotNil(t, serveCmd.Args)
|
||||
}
|
||||
|
||||
func TestServeCommandInRootCommand(t *testing.T) {
|
||||
// Test that serve is available as a subcommand of root
|
||||
cmd, _, err := rootCmd.Find([]string{"serve"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "serve", cmd.Name())
|
||||
assert.Equal(t, serveCmd, cmd)
|
||||
}
|
||||
|
||||
func TestServeCommandArgs(t *testing.T) {
|
||||
// Test that the Args function is defined and accepts any arguments
|
||||
// The current implementation always returns nil (accepts any args)
|
||||
assert.NotNil(t, serveCmd.Args)
|
||||
|
||||
// Test the args function directly
|
||||
err := serveCmd.Args(serveCmd, []string{})
|
||||
assert.NoError(t, err, "Args function should accept empty arguments")
|
||||
|
||||
err = serveCmd.Args(serveCmd, []string{"extra", "args"})
|
||||
assert.NoError(t, err, "Args function should accept extra arguments")
|
||||
}
|
||||
|
||||
func TestServeCommandHelp(t *testing.T) {
|
||||
// Test that the command has proper help text
|
||||
assert.NotEmpty(t, serveCmd.Short)
|
||||
assert.Contains(t, serveCmd.Short, "server")
|
||||
assert.Contains(t, serveCmd.Short, "headscale")
|
||||
}
|
||||
|
||||
func TestServeCommandStructure(t *testing.T) {
|
||||
// Test basic command structure
|
||||
assert.Equal(t, "serve", serveCmd.Name())
|
||||
assert.Equal(t, "Launches the headscale server", serveCmd.Short)
|
||||
|
||||
// Test that it has no subcommands (it's a leaf command)
|
||||
subcommands := serveCmd.Commands()
|
||||
assert.Empty(t, subcommands, "Serve command should not have subcommands")
|
||||
}
|
||||
|
||||
// Note: We can't easily test the actual execution of serve because:
|
||||
// 1. It depends on configuration files being present and valid
|
||||
// 2. It calls log.Fatal() which would exit the test process
|
||||
// 3. It tries to start an actual HTTP server which would block forever
|
||||
// 4. It requires database connections and other infrastructure
|
||||
//
|
||||
// In a real refactor, we would:
|
||||
// 1. Extract server initialization logic to a testable function
|
||||
// 2. Use dependency injection for configuration and dependencies
|
||||
// 3. Return errors instead of calling log.Fatal()
|
||||
// 4. Add graceful shutdown capabilities for testing
|
||||
// 5. Allow server startup to be cancelled via context
|
||||
//
|
||||
// For now, we test the command structure and basic properties.
|
||||
@@ -1,55 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
HeadscaleDateTimeFormat = "2006-01-02 15:04:05"
|
||||
DefaultAPIKeyExpiry = "90d"
|
||||
DefaultPreAuthKeyExpiry = "1h"
|
||||
)
|
||||
|
||||
// FilterTableColumns filters table columns based on --columns flag
|
||||
func FilterTableColumns(cmd *cobra.Command, tableData pterm.TableData) pterm.TableData {
|
||||
columns, _ := cmd.Flags().GetString("columns")
|
||||
if columns == "" || len(tableData) == 0 {
|
||||
return tableData
|
||||
}
|
||||
|
||||
headers := tableData[0]
|
||||
wantedColumns := strings.Split(columns, ",")
|
||||
|
||||
// Find column indices
|
||||
var indices []int
|
||||
for _, wanted := range wantedColumns {
|
||||
wanted = strings.TrimSpace(wanted)
|
||||
for i, header := range headers {
|
||||
if strings.EqualFold(header, wanted) {
|
||||
indices = append(indices, i)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(indices) == 0 {
|
||||
return tableData
|
||||
}
|
||||
|
||||
// Filter all rows
|
||||
filtered := make(pterm.TableData, len(tableData))
|
||||
for i, row := range tableData {
|
||||
newRow := make([]string, len(indices))
|
||||
for j, idx := range indices {
|
||||
if idx < len(row) {
|
||||
newRow[j] = row[idx]
|
||||
}
|
||||
}
|
||||
filtered[i] = newRow
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
@@ -1,12 +1,9 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
survey "github.com/AlecAivazis/survey/v2"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
@@ -17,23 +14,28 @@ import (
|
||||
)
|
||||
|
||||
func usernameAndIDFlag(cmd *cobra.Command) {
|
||||
cmd.Flags().StringP("user", "u", "", "User identifier (ID, name, or email)")
|
||||
cmd.Flags().Int64P("identifier", "i", -1, "User identifier (ID)")
|
||||
cmd.Flags().StringP("name", "n", "", "Username")
|
||||
}
|
||||
|
||||
// userIDFromFlag returns the user ID using smart lookup.
|
||||
// If no user is specified, it will exit the program with an error.
|
||||
func userIDFromFlag(cmd *cobra.Command) uint64 {
|
||||
userID, err := GetUserIdentifier(cmd)
|
||||
if err != nil {
|
||||
// usernameAndIDFromFlag returns the username and ID from the flags of the command.
|
||||
// If both are empty, it will exit the program with an error.
|
||||
func usernameAndIDFromFlag(cmd *cobra.Command) (uint64, string) {
|
||||
username, _ := cmd.Flags().GetString("name")
|
||||
identifier, _ := cmd.Flags().GetInt64("identifier")
|
||||
if username == "" && identifier < 0 {
|
||||
err := errors.New("--name or --identifier flag is required")
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot identify user: "+err.Error(),
|
||||
GetOutputFlag(cmd),
|
||||
fmt.Sprintf(
|
||||
"Cannot rename user: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
return userID
|
||||
return uint64(identifier), username
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -43,18 +45,14 @@ func init() {
|
||||
createUserCmd.Flags().StringP("email", "e", "", "Email")
|
||||
createUserCmd.Flags().StringP("picture-url", "p", "", "Profile picture URL")
|
||||
userCmd.AddCommand(listUsersCmd)
|
||||
// Smart lookup filters - can be used individually or combined
|
||||
listUsersCmd.Flags().StringP("user", "u", "", "Filter by user (ID, name, or email)")
|
||||
listUsersCmd.Flags().Uint64P("id", "", 0, "Filter by user ID")
|
||||
listUsersCmd.Flags().StringP("name", "n", "", "Filter by username")
|
||||
listUsersCmd.Flags().StringP("email", "e", "", "Filter by email address")
|
||||
listUsersCmd.Flags().String("columns", "", "Comma-separated list of columns to display (ID,Name,Username,Email,Created)")
|
||||
usernameAndIDFlag(listUsersCmd)
|
||||
listUsersCmd.Flags().StringP("email", "e", "", "Email")
|
||||
userCmd.AddCommand(destroyUserCmd)
|
||||
usernameAndIDFlag(destroyUserCmd)
|
||||
userCmd.AddCommand(renameUserCmd)
|
||||
usernameAndIDFlag(renameUserCmd)
|
||||
renameUserCmd.Flags().StringP("new-name", "r", "", "New username")
|
||||
renameUserCmd.MarkFlagRequired("new-name")
|
||||
renameNodeCmd.MarkFlagRequired("new-name")
|
||||
}
|
||||
|
||||
var errMissingParameter = errors.New("missing parameters")
|
||||
@@ -77,9 +75,16 @@ var createUserCmd = &cobra.Command{
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
userName := args[0]
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
log.Trace().Interface("client", client).Msg("Obtained gRPC client")
|
||||
|
||||
request := &v1.CreateUserRequest{Name: userName}
|
||||
|
||||
if displayName, _ := cmd.Flags().GetString("display-name"); displayName != "" {
|
||||
@@ -100,75 +105,64 @@ var createUserCmd = &cobra.Command{
|
||||
),
|
||||
output,
|
||||
)
|
||||
return
|
||||
}
|
||||
request.PictureUrl = pictureURL
|
||||
}
|
||||
|
||||
err := WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
log.Trace().Interface("client", client).Msg("Obtained gRPC client")
|
||||
log.Trace().Interface("request", request).Msg("Sending CreateUser request")
|
||||
|
||||
response, err := client.CreateUser(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot create user: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetUser(), "User created", output)
|
||||
return nil
|
||||
})
|
||||
log.Trace().Interface("request", request).Msg("Sending CreateUser request")
|
||||
response, err := client.CreateUser(ctx, request)
|
||||
if err != nil {
|
||||
return
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot create user: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetUser(), "User created", output)
|
||||
},
|
||||
}
|
||||
|
||||
var destroyUserCmd = &cobra.Command{
|
||||
Use: "destroy --user USER",
|
||||
Use: "destroy --identifier ID or --name NAME",
|
||||
Short: "Destroys a user",
|
||||
Aliases: []string{"delete"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
id := userIDFromFlag(cmd)
|
||||
id, username := usernameAndIDFromFlag(cmd)
|
||||
request := &v1.ListUsersRequest{
|
||||
Id: id,
|
||||
Name: username,
|
||||
Id: id,
|
||||
}
|
||||
|
||||
var user *v1.User
|
||||
err := WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
users, err := client.ListUsers(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
if len(users.GetUsers()) != 1 {
|
||||
err := errors.New("Unable to determine user to delete, query returned multiple users, use ID")
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
user = users.GetUsers()[0]
|
||||
return nil
|
||||
})
|
||||
users, err := client.ListUsers(ctx, request)
|
||||
if err != nil {
|
||||
return
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if len(users.GetUsers()) != 1 {
|
||||
err := fmt.Errorf("Unable to determine user to delete, query returned multiple users, use ID")
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
user := users.GetUsers()[0]
|
||||
|
||||
confirm := false
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
if !force {
|
||||
@@ -185,24 +179,20 @@ var destroyUserCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if confirm || force {
|
||||
err = WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
request := &v1.DeleteUserRequest{Id: user.GetId()}
|
||||
request := &v1.DeleteUserRequest{Id: user.GetId()}
|
||||
|
||||
response, err := client.DeleteUser(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot destroy user: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
SuccessOutput(response, "User destroyed", output)
|
||||
return nil
|
||||
})
|
||||
response, err := client.DeleteUser(ctx, request)
|
||||
if err != nil {
|
||||
return
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot destroy user: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
}
|
||||
SuccessOutput(response, "User destroyed", output)
|
||||
} else {
|
||||
SuccessOutput(map[string]string{"Result": "User not destroyed"}, "User not destroyed", output)
|
||||
}
|
||||
@@ -214,76 +204,64 @@ var listUsersCmd = &cobra.Command{
|
||||
Short: "List all the users",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
err := WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
request := &v1.ListUsersRequest{}
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
// Check for smart lookup flag first
|
||||
userFlag, _ := cmd.Flags().GetString("user")
|
||||
if userFlag != "" {
|
||||
// Use smart lookup to determine filter type
|
||||
if id, err := strconv.ParseUint(userFlag, 10, 64); err == nil && id > 0 {
|
||||
request.Id = id
|
||||
} else if strings.Contains(userFlag, "@") {
|
||||
request.Email = userFlag
|
||||
} else {
|
||||
request.Name = userFlag
|
||||
}
|
||||
} else {
|
||||
// Check specific filter flags
|
||||
if id, _ := cmd.Flags().GetUint64("id"); id > 0 {
|
||||
request.Id = id
|
||||
} else if name, _ := cmd.Flags().GetString("name"); name != "" {
|
||||
request.Name = name
|
||||
} else if email, _ := cmd.Flags().GetString("email"); email != "" {
|
||||
request.Email = email
|
||||
}
|
||||
}
|
||||
request := &v1.ListUsersRequest{}
|
||||
|
||||
response, err := client.ListUsers(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot get users: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
id, _ := cmd.Flags().GetInt64("identifier")
|
||||
username, _ := cmd.Flags().GetString("name")
|
||||
email, _ := cmd.Flags().GetString("email")
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetUsers(), "", output)
|
||||
return nil
|
||||
}
|
||||
// filter by one param at most
|
||||
switch {
|
||||
case id > 0:
|
||||
request.Id = uint64(id)
|
||||
break
|
||||
case username != "":
|
||||
request.Name = username
|
||||
break
|
||||
case email != "":
|
||||
request.Email = email
|
||||
break
|
||||
}
|
||||
|
||||
tableData := pterm.TableData{{"ID", "Name", "Username", "Email", "Created"}}
|
||||
for _, user := range response.GetUsers() {
|
||||
tableData = append(
|
||||
tableData,
|
||||
[]string{
|
||||
strconv.FormatUint(user.GetId(), 10),
|
||||
user.GetDisplayName(),
|
||||
user.GetName(),
|
||||
user.GetEmail(),
|
||||
user.GetCreatedAt().AsTime().Format(HeadscaleDateTimeFormat),
|
||||
},
|
||||
)
|
||||
}
|
||||
tableData = FilterTableColumns(cmd, tableData)
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
response, err := client.ListUsers(ctx, request)
|
||||
if err != nil {
|
||||
// Error already handled in closure
|
||||
return
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get users: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetUsers(), "", output)
|
||||
}
|
||||
|
||||
tableData := pterm.TableData{{"ID", "Name", "Username", "Email", "Created"}}
|
||||
for _, user := range response.GetUsers() {
|
||||
tableData = append(
|
||||
tableData,
|
||||
[]string{
|
||||
fmt.Sprintf("%d", user.GetId()),
|
||||
user.GetDisplayName(),
|
||||
user.GetName(),
|
||||
user.GetEmail(),
|
||||
user.GetCreatedAt().AsTime().Format("2006-01-02 15:04:05"),
|
||||
},
|
||||
)
|
||||
}
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -293,56 +271,55 @@ var renameUserCmd = &cobra.Command{
|
||||
Short: "Renames a user",
|
||||
Aliases: []string{"mv"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
id, username := usernameAndIDFromFlag(cmd)
|
||||
listReq := &v1.ListUsersRequest{
|
||||
Name: username,
|
||||
Id: id,
|
||||
}
|
||||
|
||||
users, err := client.ListUsers(ctx, listReq)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
if len(users.GetUsers()) != 1 {
|
||||
err := fmt.Errorf("Unable to determine user to delete, query returned multiple users, use ID")
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
id := userIDFromFlag(cmd)
|
||||
newName, _ := cmd.Flags().GetString("new-name")
|
||||
|
||||
err := WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
listReq := &v1.ListUsersRequest{
|
||||
Id: id,
|
||||
}
|
||||
|
||||
users, err := client.ListUsers(ctx, listReq)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(users.GetUsers()) != 1 {
|
||||
err := errors.New("Unable to determine user to delete, query returned multiple users, use ID")
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
renameReq := &v1.RenameUserRequest{
|
||||
OldId: id,
|
||||
NewName: newName,
|
||||
}
|
||||
|
||||
response, err := client.RenameUser(ctx, renameReq)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot rename user: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetUser(), "User renamed", output)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
renameReq := &v1.RenameUserRequest{
|
||||
OldId: id,
|
||||
NewName: newName,
|
||||
}
|
||||
|
||||
response, err := client.RenameUser(ctx, renameReq)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot rename user: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetUser(), "User renamed", output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -5,35 +5,36 @@ import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const (
|
||||
HeadscaleDateTimeFormat = "2006-01-02 15:04:05"
|
||||
SocketWritePermissions = 0o666
|
||||
)
|
||||
|
||||
func newHeadscaleServerWithConfig() (*hscontrol.Headscale, error) {
|
||||
cfg, err := types.LoadServerConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"loading configuration: %w",
|
||||
"failed to load configuration while creating headscale instance: %w",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
app, err := hscontrol.NewHeadscale(cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating new headscale: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return app, nil
|
||||
@@ -71,7 +72,7 @@ func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *g
|
||||
|
||||
// Try to give the user better feedback if we cannot write to the headscale
|
||||
// socket.
|
||||
socket, err := os.OpenFile(cfg.UnixSocket, os.O_WRONLY, 0o666) // nolint
|
||||
socket, err := os.OpenFile(cfg.UnixSocket, os.O_WRONLY, SocketWritePermissions) // nolint
|
||||
if err != nil {
|
||||
if os.IsPermission(err) {
|
||||
log.Fatal().
|
||||
@@ -199,152 +200,3 @@ func (t tokenAuth) GetRequestMetadata(
|
||||
func (tokenAuth) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetOutputFlag returns the output flag value (never fails)
|
||||
func GetOutputFlag(cmd *cobra.Command) string {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
return output
|
||||
}
|
||||
|
||||
|
||||
// GetNodeIdentifier returns the node ID using smart lookup via gRPC ListNodes call
|
||||
func GetNodeIdentifier(cmd *cobra.Command) (uint64, error) {
|
||||
nodeFlag, _ := cmd.Flags().GetString("node")
|
||||
|
||||
// Use --node flag
|
||||
if nodeFlag == "" {
|
||||
return 0, fmt.Errorf("--node flag is required")
|
||||
}
|
||||
|
||||
// Use smart lookup via gRPC
|
||||
return lookupNodeBySpecifier(nodeFlag)
|
||||
}
|
||||
|
||||
// lookupNodeBySpecifier performs smart lookup of a node by ID, name, hostname, or IP
|
||||
func lookupNodeBySpecifier(specifier string) (uint64, error) {
|
||||
var nodeID uint64
|
||||
|
||||
err := WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
request := &v1.ListNodesRequest{}
|
||||
|
||||
// Detect what type of specifier this is and set appropriate filter
|
||||
if id, err := strconv.ParseUint(specifier, 10, 64); err == nil && id > 0 {
|
||||
// Looks like a numeric ID
|
||||
request.Id = id
|
||||
} else if isIPAddress(specifier) {
|
||||
// Looks like an IP address
|
||||
request.IpAddresses = []string{specifier}
|
||||
} else {
|
||||
// Treat as hostname/name
|
||||
request.Name = specifier
|
||||
}
|
||||
|
||||
response, err := client.ListNodes(ctx, request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to lookup node: %w", err)
|
||||
}
|
||||
|
||||
nodes := response.GetNodes()
|
||||
if len(nodes) == 0 {
|
||||
return fmt.Errorf("node not found")
|
||||
}
|
||||
|
||||
if len(nodes) > 1 {
|
||||
var nodeInfo []string
|
||||
for _, node := range nodes {
|
||||
nodeInfo = append(nodeInfo, fmt.Sprintf("ID=%d name=%s", node.GetId(), node.GetName()))
|
||||
}
|
||||
return fmt.Errorf("multiple nodes found matching '%s': %s", specifier, strings.Join(nodeInfo, ", "))
|
||||
}
|
||||
|
||||
// Exactly one match - this is what we want
|
||||
nodeID = nodes[0].GetId()
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return nodeID, nil
|
||||
}
|
||||
|
||||
// isIPAddress checks if a string looks like an IP address
|
||||
func isIPAddress(s string) bool {
|
||||
// Try parsing as IP address (both IPv4 and IPv6)
|
||||
if net.ParseIP(s) != nil {
|
||||
return true
|
||||
}
|
||||
// Try parsing as CIDR
|
||||
if _, _, err := net.ParseCIDR(s); err == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetUserIdentifier returns the user ID using smart lookup via gRPC ListUsers call
|
||||
func GetUserIdentifier(cmd *cobra.Command) (uint64, error) {
|
||||
userFlag, _ := cmd.Flags().GetString("user")
|
||||
nameFlag, _ := cmd.Flags().GetString("name")
|
||||
|
||||
var specifier string
|
||||
|
||||
// Determine which flag was used (prefer --user, fall back to legacy flags)
|
||||
if userFlag != "" {
|
||||
specifier = userFlag
|
||||
} else if nameFlag != "" {
|
||||
specifier = nameFlag
|
||||
} else {
|
||||
return 0, fmt.Errorf("--user flag is required")
|
||||
}
|
||||
|
||||
// Use smart lookup via gRPC
|
||||
return lookupUserBySpecifier(specifier)
|
||||
}
|
||||
|
||||
// lookupUserBySpecifier performs smart lookup of a user by ID, name, or email
|
||||
func lookupUserBySpecifier(specifier string) (uint64, error) {
|
||||
var userID uint64
|
||||
|
||||
err := WithClient(func(ctx context.Context, client v1.HeadscaleServiceClient) error {
|
||||
request := &v1.ListUsersRequest{}
|
||||
|
||||
// Detect what type of specifier this is and set appropriate filter
|
||||
if id, err := strconv.ParseUint(specifier, 10, 64); err == nil && id > 0 {
|
||||
// Looks like a numeric ID
|
||||
request.Id = id
|
||||
} else if strings.Contains(specifier, "@") {
|
||||
// Looks like an email address
|
||||
request.Email = specifier
|
||||
} else {
|
||||
// Treat as username
|
||||
request.Name = specifier
|
||||
}
|
||||
|
||||
response, err := client.ListUsers(ctx, request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to lookup user: %w", err)
|
||||
}
|
||||
|
||||
users := response.GetUsers()
|
||||
if len(users) == 0 {
|
||||
return fmt.Errorf("user not found")
|
||||
}
|
||||
|
||||
if len(users) > 1 {
|
||||
var userInfo []string
|
||||
for _, user := range users {
|
||||
userInfo = append(userInfo, fmt.Sprintf("ID=%d name=%s email=%s", user.GetId(), user.GetName(), user.GetEmail()))
|
||||
}
|
||||
return fmt.Errorf("multiple users found matching '%s': %s", specifier, strings.Join(userInfo, ", "))
|
||||
}
|
||||
|
||||
// Exactly one match - this is what we want
|
||||
userID = users[0].GetId()
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return userID, nil
|
||||
}
|
||||
|
||||
@@ -1,175 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestHasMachineOutputFlag(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "no machine output flags",
|
||||
args: []string{"headscale", "users", "list"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "json flag present",
|
||||
args: []string{"headscale", "users", "list", "json"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "json-line flag present",
|
||||
args: []string{"headscale", "nodes", "list", "json-line"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "yaml flag present",
|
||||
args: []string{"headscale", "apikeys", "list", "yaml"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "mixed flags with json",
|
||||
args: []string{"headscale", "--config", "/tmp/config.yaml", "users", "list", "json"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "flag as part of longer argument",
|
||||
args: []string{"headscale", "users", "create", "json-user@example.com"},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Save original os.Args
|
||||
originalArgs := os.Args
|
||||
defer func() { os.Args = originalArgs }()
|
||||
|
||||
// Set os.Args to test case
|
||||
os.Args = tt.args
|
||||
|
||||
result := HasMachineOutputFlag()
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutput(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
result interface{}
|
||||
override string
|
||||
outputFormat string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "default format returns override",
|
||||
result: map[string]string{"test": "value"},
|
||||
override: "Human readable output",
|
||||
outputFormat: "",
|
||||
expected: "Human readable output",
|
||||
},
|
||||
{
|
||||
name: "default format with empty override",
|
||||
result: map[string]string{"test": "value"},
|
||||
override: "",
|
||||
outputFormat: "",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "json format",
|
||||
result: map[string]string{"name": "test", "id": "123"},
|
||||
override: "Human readable",
|
||||
outputFormat: "json",
|
||||
expected: "{\n\t\"id\": \"123\",\n\t\"name\": \"test\"\n}",
|
||||
},
|
||||
{
|
||||
name: "json-line format",
|
||||
result: map[string]string{"name": "test", "id": "123"},
|
||||
override: "Human readable",
|
||||
outputFormat: "json-line",
|
||||
expected: "{\"id\":\"123\",\"name\":\"test\"}",
|
||||
},
|
||||
{
|
||||
name: "yaml format",
|
||||
result: map[string]string{"name": "test", "id": "123"},
|
||||
override: "Human readable",
|
||||
outputFormat: "yaml",
|
||||
expected: "id: \"123\"\nname: test\n",
|
||||
},
|
||||
{
|
||||
name: "invalid format returns override",
|
||||
result: map[string]string{"test": "value"},
|
||||
override: "Human readable output",
|
||||
outputFormat: "invalid",
|
||||
expected: "Human readable output",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := output(tt.result, tt.override, tt.outputFormat)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutputWithComplexData(t *testing.T) {
|
||||
// Test with more complex data structures
|
||||
complexData := struct {
|
||||
Users []struct {
|
||||
Name string `json:"name" yaml:"name"`
|
||||
ID int `json:"id" yaml:"id"`
|
||||
} `json:"users" yaml:"users"`
|
||||
}{
|
||||
Users: []struct {
|
||||
Name string `json:"name" yaml:"name"`
|
||||
ID int `json:"id" yaml:"id"`
|
||||
}{
|
||||
{Name: "user1", ID: 1},
|
||||
{Name: "user2", ID: 2},
|
||||
},
|
||||
}
|
||||
|
||||
// Test JSON output
|
||||
jsonResult := output(complexData, "override", "json")
|
||||
assert.Contains(t, jsonResult, "\"users\":")
|
||||
assert.Contains(t, jsonResult, "\"name\": \"user1\"")
|
||||
assert.Contains(t, jsonResult, "\"id\": 1")
|
||||
|
||||
// Test YAML output
|
||||
yamlResult := output(complexData, "override", "yaml")
|
||||
assert.Contains(t, yamlResult, "users:")
|
||||
assert.Contains(t, yamlResult, "name: user1")
|
||||
assert.Contains(t, yamlResult, "id: 1")
|
||||
}
|
||||
|
||||
func TestOutputWithNilData(t *testing.T) {
|
||||
// Test with nil data
|
||||
result := output(nil, "fallback", "json")
|
||||
assert.Equal(t, "null", result)
|
||||
|
||||
result = output(nil, "fallback", "yaml")
|
||||
assert.Equal(t, "null\n", result)
|
||||
|
||||
result = output(nil, "fallback", "")
|
||||
assert.Equal(t, "fallback", result)
|
||||
}
|
||||
|
||||
func TestOutputWithEmptyData(t *testing.T) {
|
||||
// Test with empty slice
|
||||
emptySlice := []string{}
|
||||
result := output(emptySlice, "fallback", "json")
|
||||
assert.Equal(t, "[]", result)
|
||||
|
||||
// Test with empty map
|
||||
emptyMap := map[string]string{}
|
||||
result = output(emptyMap, "fallback", "json")
|
||||
assert.Equal(t, "{}", result)
|
||||
}
|
||||
@@ -1,23 +1,21 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var Version = "dev"
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(versionCmd)
|
||||
}
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print the version",
|
||||
Long: "The version of headscale",
|
||||
Short: "Print the version.",
|
||||
Long: "The version of headscale.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output := GetOutputFlag(cmd)
|
||||
SuccessOutput(map[string]string{
|
||||
"version": types.Version,
|
||||
"commit": types.GitCommitHash,
|
||||
}, types.Version, output)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
SuccessOutput(map[string]string{"version": Version}, Version, output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestVersionCommand(t *testing.T) {
|
||||
// Test that version command exists
|
||||
assert.NotNil(t, versionCmd)
|
||||
assert.Equal(t, "version", versionCmd.Use)
|
||||
assert.Equal(t, "Print the version.", versionCmd.Short)
|
||||
assert.Equal(t, "The version of headscale.", versionCmd.Long)
|
||||
}
|
||||
|
||||
func TestVersionCommandStructure(t *testing.T) {
|
||||
// Test command is properly added to root
|
||||
found := false
|
||||
for _, cmd := range rootCmd.Commands() {
|
||||
if cmd.Use == "version" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "version command should be added to root command")
|
||||
}
|
||||
|
||||
func TestVersionCommandFlags(t *testing.T) {
|
||||
// Version command should inherit output flag from root as persistent flag
|
||||
outputFlag := versionCmd.Flag("output")
|
||||
if outputFlag == nil {
|
||||
// Try persistent flags from root
|
||||
outputFlag = rootCmd.PersistentFlags().Lookup("output")
|
||||
}
|
||||
assert.NotNil(t, outputFlag, "version command should have access to output flag")
|
||||
}
|
||||
|
||||
func TestVersionCommandRun(t *testing.T) {
|
||||
// Test that Run function is set
|
||||
assert.NotNil(t, versionCmd.Run)
|
||||
|
||||
// We can't easily test the actual execution without mocking SuccessOutput
|
||||
// but we can verify the function exists and has the right signature
|
||||
}
|
||||
@@ -1,207 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/errdefs"
|
||||
)
|
||||
|
||||
// cleanupBeforeTest performs cleanup operations before running tests.
|
||||
func cleanupBeforeTest(ctx context.Context) error {
|
||||
if err := killTestContainers(ctx); err != nil {
|
||||
return fmt.Errorf("failed to kill test containers: %w", err)
|
||||
}
|
||||
|
||||
if err := pruneDockerNetworks(ctx); err != nil {
|
||||
return fmt.Errorf("failed to prune networks: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupAfterTest removes the test container after completion.
|
||||
func cleanupAfterTest(ctx context.Context, cli *client.Client, containerID string) error {
|
||||
return cli.ContainerRemove(ctx, containerID, container.RemoveOptions{
|
||||
Force: true,
|
||||
})
|
||||
}
|
||||
|
||||
// killTestContainers terminates and removes all test containers.
|
||||
func killTestContainers(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{
|
||||
All: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers: %w", err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
for _, cont := range containers {
|
||||
shouldRemove := false
|
||||
for _, name := range cont.Names {
|
||||
if strings.Contains(name, "headscale-test-suite") ||
|
||||
strings.Contains(name, "hs-") ||
|
||||
strings.Contains(name, "ts-") ||
|
||||
strings.Contains(name, "derp-") {
|
||||
shouldRemove = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if shouldRemove {
|
||||
// First kill the container if it's running
|
||||
if cont.State == "running" {
|
||||
_ = cli.ContainerKill(ctx, cont.ID, "KILL")
|
||||
}
|
||||
|
||||
// Then remove the container with retry logic
|
||||
if removeContainerWithRetry(ctx, cli, cont.ID) {
|
||||
removed++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if removed > 0 {
|
||||
fmt.Printf("Removed %d test containers\n", removed)
|
||||
} else {
|
||||
fmt.Println("No test containers found to remove")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeContainerWithRetry attempts to remove a container with exponential backoff retry logic.
|
||||
func removeContainerWithRetry(ctx context.Context, cli *client.Client, containerID string) bool {
|
||||
maxRetries := 3
|
||||
baseDelay := 100 * time.Millisecond
|
||||
|
||||
for attempt := range maxRetries {
|
||||
err := cli.ContainerRemove(ctx, containerID, container.RemoveOptions{
|
||||
Force: true,
|
||||
})
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// If this is the last attempt, don't wait
|
||||
if attempt == maxRetries-1 {
|
||||
break
|
||||
}
|
||||
|
||||
// Wait with exponential backoff
|
||||
delay := baseDelay * time.Duration(1<<attempt)
|
||||
time.Sleep(delay)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// pruneDockerNetworks removes unused Docker networks.
|
||||
func pruneDockerNetworks(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
report, err := cli.NetworksPrune(ctx, filters.Args{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prune networks: %w", err)
|
||||
}
|
||||
|
||||
if len(report.NetworksDeleted) > 0 {
|
||||
fmt.Printf("Removed %d unused networks\n", len(report.NetworksDeleted))
|
||||
} else {
|
||||
fmt.Println("No unused networks found to remove")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanOldImages removes test-related and old dangling Docker images.
|
||||
func cleanOldImages(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
images, err := cli.ImageList(ctx, image.ListOptions{
|
||||
All: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list images: %w", err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
for _, img := range images {
|
||||
shouldRemove := false
|
||||
for _, tag := range img.RepoTags {
|
||||
if strings.Contains(tag, "hs-") ||
|
||||
strings.Contains(tag, "headscale-integration") ||
|
||||
strings.Contains(tag, "tailscale") {
|
||||
shouldRemove = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(img.RepoTags) == 0 && time.Unix(img.Created, 0).Before(time.Now().Add(-7*24*time.Hour)) {
|
||||
shouldRemove = true
|
||||
}
|
||||
|
||||
if shouldRemove {
|
||||
_, err := cli.ImageRemove(ctx, img.ID, image.RemoveOptions{
|
||||
Force: true,
|
||||
})
|
||||
if err == nil {
|
||||
removed++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if removed > 0 {
|
||||
fmt.Printf("Removed %d test images\n", removed)
|
||||
} else {
|
||||
fmt.Println("No test images found to remove")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanCacheVolume removes the Docker volume used for Go module cache.
|
||||
func cleanCacheVolume(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
volumeName := "hs-integration-go-cache"
|
||||
err = cli.VolumeRemove(ctx, volumeName, true)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
fmt.Printf("Go module cache volume not found: %s\n", volumeName)
|
||||
} else if errdefs.IsConflict(err) {
|
||||
fmt.Printf("Go module cache volume is in use and cannot be removed: %s\n", volumeName)
|
||||
} else {
|
||||
fmt.Printf("Failed to remove Go module cache volume %s: %v\n", volumeName, err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Removed Go module cache volume: %s\n", volumeName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
695
cmd/hi/docker.go
695
cmd/hi/docker.go
@@ -1,695 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrTestFailed = errors.New("test failed")
|
||||
ErrUnexpectedContainerWait = errors.New("unexpected end of container wait")
|
||||
ErrNoDockerContext = errors.New("no docker context found")
|
||||
)
|
||||
|
||||
// runTestContainer executes integration tests in a Docker container.
|
||||
func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
runID := dockertestutil.GenerateRunID()
|
||||
containerName := "headscale-test-suite-" + runID
|
||||
logsDir := filepath.Join(config.LogsDir, runID)
|
||||
|
||||
if config.Verbose {
|
||||
log.Printf("Run ID: %s", runID)
|
||||
log.Printf("Container name: %s", containerName)
|
||||
log.Printf("Logs directory: %s", logsDir)
|
||||
}
|
||||
|
||||
absLogsDir, err := filepath.Abs(logsDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get absolute path for logs directory: %w", err)
|
||||
}
|
||||
|
||||
const dirPerm = 0o755
|
||||
if err := os.MkdirAll(absLogsDir, dirPerm); err != nil {
|
||||
return fmt.Errorf("failed to create logs directory: %w", err)
|
||||
}
|
||||
|
||||
if config.CleanBefore {
|
||||
if config.Verbose {
|
||||
log.Printf("Running pre-test cleanup...")
|
||||
}
|
||||
if err := cleanupBeforeTest(ctx); err != nil && config.Verbose {
|
||||
log.Printf("Warning: pre-test cleanup failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
goTestCmd := buildGoTestCommand(config)
|
||||
if config.Verbose {
|
||||
log.Printf("Command: %s", strings.Join(goTestCmd, " "))
|
||||
}
|
||||
|
||||
imageName := "golang:" + config.GoVersion
|
||||
if err := ensureImageAvailable(ctx, cli, imageName, config.Verbose); err != nil {
|
||||
return fmt.Errorf("failed to ensure image availability: %w", err)
|
||||
}
|
||||
|
||||
resp, err := createGoTestContainer(ctx, cli, config, containerName, absLogsDir, goTestCmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create container: %w", err)
|
||||
}
|
||||
|
||||
if config.Verbose {
|
||||
log.Printf("Created container: %s", resp.ID)
|
||||
}
|
||||
|
||||
if err := cli.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to start container: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("Starting test: %s", config.TestPattern)
|
||||
|
||||
exitCode, err := streamAndWait(ctx, cli, resp.ID)
|
||||
|
||||
// Ensure all containers have finished and logs are flushed before extracting artifacts
|
||||
if waitErr := waitForContainerFinalization(ctx, cli, resp.ID, config.Verbose); waitErr != nil && config.Verbose {
|
||||
log.Printf("Warning: failed to wait for container finalization: %v", waitErr)
|
||||
}
|
||||
|
||||
// Extract artifacts from test containers before cleanup
|
||||
if err := extractArtifactsFromContainers(ctx, resp.ID, logsDir, config.Verbose); err != nil && config.Verbose {
|
||||
log.Printf("Warning: failed to extract artifacts from containers: %v", err)
|
||||
}
|
||||
|
||||
// Always list control files regardless of test outcome
|
||||
listControlFiles(logsDir)
|
||||
|
||||
shouldCleanup := config.CleanAfter && (!config.KeepOnFailure || exitCode == 0)
|
||||
if shouldCleanup {
|
||||
if config.Verbose {
|
||||
log.Printf("Running post-test cleanup...")
|
||||
}
|
||||
if cleanErr := cleanupAfterTest(ctx, cli, resp.ID); cleanErr != nil && config.Verbose {
|
||||
log.Printf("Warning: post-test cleanup failed: %v", cleanErr)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("test execution failed: %w", err)
|
||||
}
|
||||
|
||||
if exitCode != 0 {
|
||||
return fmt.Errorf("%w: exit code %d", ErrTestFailed, exitCode)
|
||||
}
|
||||
|
||||
log.Printf("Test completed successfully!")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildGoTestCommand constructs the go test command arguments.
|
||||
func buildGoTestCommand(config *RunConfig) []string {
|
||||
cmd := []string{"go", "test", "./..."}
|
||||
|
||||
if config.TestPattern != "" {
|
||||
cmd = append(cmd, "-run", config.TestPattern)
|
||||
}
|
||||
|
||||
if config.FailFast {
|
||||
cmd = append(cmd, "-failfast")
|
||||
}
|
||||
|
||||
cmd = append(cmd, "-timeout", config.Timeout.String())
|
||||
cmd = append(cmd, "-v")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// createGoTestContainer creates a Docker container configured for running integration tests.
|
||||
func createGoTestContainer(ctx context.Context, cli *client.Client, config *RunConfig, containerName, logsDir string, goTestCmd []string) (container.CreateResponse, error) {
|
||||
pwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return container.CreateResponse{}, fmt.Errorf("failed to get working directory: %w", err)
|
||||
}
|
||||
|
||||
projectRoot := findProjectRoot(pwd)
|
||||
|
||||
runID := dockertestutil.ExtractRunIDFromContainerName(containerName)
|
||||
|
||||
env := []string{
|
||||
fmt.Sprintf("HEADSCALE_INTEGRATION_POSTGRES=%d", boolToInt(config.UsePostgres)),
|
||||
"HEADSCALE_INTEGRATION_RUN_ID=" + runID,
|
||||
}
|
||||
containerConfig := &container.Config{
|
||||
Image: "golang:" + config.GoVersion,
|
||||
Cmd: goTestCmd,
|
||||
Env: env,
|
||||
WorkingDir: projectRoot + "/integration",
|
||||
Tty: true,
|
||||
Labels: map[string]string{
|
||||
"hi.run-id": runID,
|
||||
"hi.test-type": "test-runner",
|
||||
},
|
||||
}
|
||||
|
||||
// Get the correct Docker socket path from the current context
|
||||
dockerSocketPath := getDockerSocketPath()
|
||||
|
||||
if config.Verbose {
|
||||
log.Printf("Using Docker socket: %s", dockerSocketPath)
|
||||
}
|
||||
|
||||
hostConfig := &container.HostConfig{
|
||||
AutoRemove: false, // We'll remove manually for better control
|
||||
Binds: []string{
|
||||
fmt.Sprintf("%s:%s", projectRoot, projectRoot),
|
||||
dockerSocketPath + ":/var/run/docker.sock",
|
||||
logsDir + ":/tmp/control",
|
||||
},
|
||||
Mounts: []mount.Mount{
|
||||
{
|
||||
Type: mount.TypeVolume,
|
||||
Source: "hs-integration-go-cache",
|
||||
Target: "/go",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return cli.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, containerName)
|
||||
}
|
||||
|
||||
// streamAndWait streams container output and waits for completion.
|
||||
func streamAndWait(ctx context.Context, cli *client.Client, containerID string) (int, error) {
|
||||
out, err := cli.ContainerLogs(ctx, containerID, container.LogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Follow: true,
|
||||
})
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("failed to get container logs: %w", err)
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
go func() {
|
||||
_, _ = io.Copy(os.Stdout, out)
|
||||
}()
|
||||
|
||||
statusCh, errCh := cli.ContainerWait(ctx, containerID, container.WaitConditionNotRunning)
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error waiting for container: %w", err)
|
||||
}
|
||||
case status := <-statusCh:
|
||||
return int(status.StatusCode), nil
|
||||
}
|
||||
|
||||
return -1, ErrUnexpectedContainerWait
|
||||
}
|
||||
|
||||
// waitForContainerFinalization ensures all test containers have properly finished and flushed their output.
|
||||
func waitForContainerFinalization(ctx context.Context, cli *client.Client, testContainerID string, verbose bool) error {
|
||||
// First, get all related test containers
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers: %w", err)
|
||||
}
|
||||
|
||||
testContainers := getCurrentTestContainers(containers, testContainerID, verbose)
|
||||
|
||||
// Wait for all test containers to reach a final state
|
||||
maxWaitTime := 10 * time.Second
|
||||
checkInterval := 500 * time.Millisecond
|
||||
timeout := time.After(maxWaitTime)
|
||||
ticker := time.NewTicker(checkInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
if verbose {
|
||||
log.Printf("Timeout waiting for container finalization, proceeding with artifact extraction")
|
||||
}
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
allFinalized := true
|
||||
|
||||
for _, testCont := range testContainers {
|
||||
inspect, err := cli.ContainerInspect(ctx, testCont.ID)
|
||||
if err != nil {
|
||||
if verbose {
|
||||
log.Printf("Warning: failed to inspect container %s: %v", testCont.name, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if container is in a final state
|
||||
if !isContainerFinalized(inspect.State) {
|
||||
allFinalized = false
|
||||
if verbose {
|
||||
log.Printf("Container %s still finalizing (state: %s)", testCont.name, inspect.State.Status)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if allFinalized {
|
||||
if verbose {
|
||||
log.Printf("All test containers finalized, ready for artifact extraction")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isContainerFinalized checks if a container has reached a final state where logs are flushed.
|
||||
func isContainerFinalized(state *container.State) bool {
|
||||
// Container is finalized if it's not running and has a finish time
|
||||
return !state.Running && state.FinishedAt != ""
|
||||
}
|
||||
|
||||
// findProjectRoot locates the project root by finding the directory containing go.mod.
|
||||
func findProjectRoot(startPath string) string {
|
||||
current := startPath
|
||||
for {
|
||||
if _, err := os.Stat(filepath.Join(current, "go.mod")); err == nil {
|
||||
return current
|
||||
}
|
||||
parent := filepath.Dir(current)
|
||||
if parent == current {
|
||||
return startPath
|
||||
}
|
||||
current = parent
|
||||
}
|
||||
}
|
||||
|
||||
// boolToInt converts a boolean to an integer for environment variables.
|
||||
func boolToInt(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// DockerContext represents Docker context information.
|
||||
type DockerContext struct {
|
||||
Name string `json:"Name"`
|
||||
Metadata map[string]interface{} `json:"Metadata"`
|
||||
Endpoints map[string]interface{} `json:"Endpoints"`
|
||||
Current bool `json:"Current"`
|
||||
}
|
||||
|
||||
// createDockerClient creates a Docker client with context detection.
|
||||
func createDockerClient() (*client.Client, error) {
|
||||
contextInfo, err := getCurrentDockerContext()
|
||||
if err != nil {
|
||||
return client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
}
|
||||
|
||||
var clientOpts []client.Opt
|
||||
clientOpts = append(clientOpts, client.WithAPIVersionNegotiation())
|
||||
|
||||
if contextInfo != nil {
|
||||
if endpoints, ok := contextInfo.Endpoints["docker"]; ok {
|
||||
if endpointMap, ok := endpoints.(map[string]interface{}); ok {
|
||||
if host, ok := endpointMap["Host"].(string); ok {
|
||||
if runConfig.Verbose {
|
||||
log.Printf("Using Docker host from context '%s': %s", contextInfo.Name, host)
|
||||
}
|
||||
clientOpts = append(clientOpts, client.WithHost(host))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(clientOpts) == 1 {
|
||||
clientOpts = append(clientOpts, client.FromEnv)
|
||||
}
|
||||
|
||||
return client.NewClientWithOpts(clientOpts...)
|
||||
}
|
||||
|
||||
// getCurrentDockerContext retrieves the current Docker context information.
|
||||
func getCurrentDockerContext() (*DockerContext, error) {
|
||||
cmd := exec.Command("docker", "context", "inspect")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get docker context: %w", err)
|
||||
}
|
||||
|
||||
var contexts []DockerContext
|
||||
if err := json.Unmarshal(output, &contexts); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse docker context: %w", err)
|
||||
}
|
||||
|
||||
if len(contexts) > 0 {
|
||||
return &contexts[0], nil
|
||||
}
|
||||
|
||||
return nil, ErrNoDockerContext
|
||||
}
|
||||
|
||||
// getDockerSocketPath returns the correct Docker socket path for the current context.
|
||||
func getDockerSocketPath() string {
|
||||
// Always use the default socket path for mounting since Docker handles
|
||||
// the translation to the actual socket (e.g., colima socket) internally
|
||||
return "/var/run/docker.sock"
|
||||
}
|
||||
|
||||
// ensureImageAvailable pulls the specified Docker image to ensure it's available.
|
||||
func ensureImageAvailable(ctx context.Context, cli *client.Client, imageName string, verbose bool) error {
|
||||
if verbose {
|
||||
log.Printf("Pulling image %s...", imageName)
|
||||
}
|
||||
|
||||
reader, err := cli.ImagePull(ctx, imageName, image.PullOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to pull image %s: %w", imageName, err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
if verbose {
|
||||
_, err = io.Copy(os.Stdout, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read pull output: %w", err)
|
||||
}
|
||||
} else {
|
||||
_, err = io.Copy(io.Discard, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read pull output: %w", err)
|
||||
}
|
||||
log.Printf("Image %s pulled successfully", imageName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// listControlFiles displays the headscale test artifacts created in the control logs directory.
|
||||
func listControlFiles(logsDir string) {
|
||||
entries, err := os.ReadDir(logsDir)
|
||||
if err != nil {
|
||||
log.Printf("Logs directory: %s", logsDir)
|
||||
return
|
||||
}
|
||||
|
||||
var logFiles []string
|
||||
var dataFiles []string
|
||||
var dataDirs []string
|
||||
|
||||
for _, entry := range entries {
|
||||
name := entry.Name()
|
||||
// Only show headscale (hs-*) files and directories
|
||||
if !strings.HasPrefix(name, "hs-") {
|
||||
continue
|
||||
}
|
||||
|
||||
if entry.IsDir() {
|
||||
// Include directories (pprof, mapresponses)
|
||||
if strings.Contains(name, "-pprof") || strings.Contains(name, "-mapresponses") {
|
||||
dataDirs = append(dataDirs, name)
|
||||
}
|
||||
} else {
|
||||
// Include files
|
||||
switch {
|
||||
case strings.HasSuffix(name, ".stderr.log") || strings.HasSuffix(name, ".stdout.log"):
|
||||
logFiles = append(logFiles, name)
|
||||
case strings.HasSuffix(name, ".db"):
|
||||
dataFiles = append(dataFiles, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Test artifacts saved to: %s", logsDir)
|
||||
|
||||
if len(logFiles) > 0 {
|
||||
log.Printf("Headscale logs:")
|
||||
for _, file := range logFiles {
|
||||
log.Printf(" %s", file)
|
||||
}
|
||||
}
|
||||
|
||||
if len(dataFiles) > 0 || len(dataDirs) > 0 {
|
||||
log.Printf("Headscale data:")
|
||||
for _, file := range dataFiles {
|
||||
log.Printf(" %s", file)
|
||||
}
|
||||
for _, dir := range dataDirs {
|
||||
log.Printf(" %s/", dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// extractArtifactsFromContainers collects container logs and files from the specific test run.
|
||||
func extractArtifactsFromContainers(ctx context.Context, testContainerID, logsDir string, verbose bool) error {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// List all containers
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers: %w", err)
|
||||
}
|
||||
|
||||
// Get containers from the specific test run
|
||||
currentTestContainers := getCurrentTestContainers(containers, testContainerID, verbose)
|
||||
|
||||
extractedCount := 0
|
||||
for _, cont := range currentTestContainers {
|
||||
// Extract container logs and tar files
|
||||
if err := extractContainerArtifacts(ctx, cli, cont.ID, cont.name, logsDir, verbose); err != nil {
|
||||
if verbose {
|
||||
log.Printf("Warning: failed to extract artifacts from container %s (%s): %v", cont.name, cont.ID[:12], err)
|
||||
}
|
||||
} else {
|
||||
if verbose {
|
||||
log.Printf("Extracted artifacts from container %s (%s)", cont.name, cont.ID[:12])
|
||||
}
|
||||
extractedCount++
|
||||
}
|
||||
}
|
||||
|
||||
if verbose && extractedCount > 0 {
|
||||
log.Printf("Extracted artifacts from %d containers", extractedCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// testContainer represents a container from the current test run.
|
||||
type testContainer struct {
|
||||
ID string
|
||||
name string
|
||||
}
|
||||
|
||||
// getCurrentTestContainers filters containers to only include those from the current test run.
|
||||
func getCurrentTestContainers(containers []container.Summary, testContainerID string, verbose bool) []testContainer {
|
||||
var testRunContainers []testContainer
|
||||
|
||||
// Find the test container to get its run ID label
|
||||
var runID string
|
||||
for _, cont := range containers {
|
||||
if cont.ID == testContainerID {
|
||||
if cont.Labels != nil {
|
||||
runID = cont.Labels["hi.run-id"]
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if runID == "" {
|
||||
log.Printf("Error: test container %s missing required hi.run-id label", testContainerID[:12])
|
||||
return testRunContainers
|
||||
}
|
||||
|
||||
if verbose {
|
||||
log.Printf("Looking for containers with run ID: %s", runID)
|
||||
}
|
||||
|
||||
// Find all containers with the same run ID
|
||||
for _, cont := range containers {
|
||||
for _, name := range cont.Names {
|
||||
containerName := strings.TrimPrefix(name, "/")
|
||||
if strings.HasPrefix(containerName, "hs-") || strings.HasPrefix(containerName, "ts-") {
|
||||
// Check if container has matching run ID label
|
||||
if cont.Labels != nil && cont.Labels["hi.run-id"] == runID {
|
||||
testRunContainers = append(testRunContainers, testContainer{
|
||||
ID: cont.ID,
|
||||
name: containerName,
|
||||
})
|
||||
if verbose {
|
||||
log.Printf("Including container %s (run ID: %s)", containerName, runID)
|
||||
}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return testRunContainers
|
||||
}
|
||||
|
||||
// extractContainerArtifacts saves logs and tar files from a container.
|
||||
func extractContainerArtifacts(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {
|
||||
// Ensure the logs directory exists
|
||||
if err := os.MkdirAll(logsDir, 0o755); err != nil {
|
||||
return fmt.Errorf("failed to create logs directory: %w", err)
|
||||
}
|
||||
|
||||
// Extract container logs
|
||||
if err := extractContainerLogs(ctx, cli, containerID, containerName, logsDir, verbose); err != nil {
|
||||
return fmt.Errorf("failed to extract logs: %w", err)
|
||||
}
|
||||
|
||||
// Extract tar files for headscale containers only
|
||||
if strings.HasPrefix(containerName, "hs-") {
|
||||
if err := extractContainerFiles(ctx, cli, containerID, containerName, logsDir, verbose); err != nil {
|
||||
if verbose {
|
||||
log.Printf("Warning: failed to extract files from %s: %v", containerName, err)
|
||||
}
|
||||
// Don't fail the whole extraction if files are missing
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractContainerLogs saves the stdout and stderr logs from a container to files.
|
||||
func extractContainerLogs(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {
|
||||
// Get container logs
|
||||
logReader, err := cli.ContainerLogs(ctx, containerID, container.LogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Timestamps: false,
|
||||
Follow: false,
|
||||
Tail: "all",
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get container logs: %w", err)
|
||||
}
|
||||
defer logReader.Close()
|
||||
|
||||
// Create log files following the headscale naming convention
|
||||
stdoutPath := filepath.Join(logsDir, containerName+".stdout.log")
|
||||
stderrPath := filepath.Join(logsDir, containerName+".stderr.log")
|
||||
|
||||
// Create buffers to capture stdout and stderr separately
|
||||
var stdoutBuf, stderrBuf bytes.Buffer
|
||||
|
||||
// Demultiplex the Docker logs stream to separate stdout and stderr
|
||||
_, err = stdcopy.StdCopy(&stdoutBuf, &stderrBuf, logReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to demultiplex container logs: %w", err)
|
||||
}
|
||||
|
||||
// Write stdout logs
|
||||
if err := os.WriteFile(stdoutPath, stdoutBuf.Bytes(), 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write stdout log: %w", err)
|
||||
}
|
||||
|
||||
// Write stderr logs
|
||||
if err := os.WriteFile(stderrPath, stderrBuf.Bytes(), 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write stderr log: %w", err)
|
||||
}
|
||||
|
||||
if verbose {
|
||||
log.Printf("Saved logs for %s: %s, %s", containerName, stdoutPath, stderrPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractContainerFiles extracts database file and directories from headscale containers.
|
||||
// Note: The actual file extraction is now handled by the integration tests themselves
|
||||
// via SaveProfile, SaveMapResponses, and SaveDatabase functions in hsic.go.
|
||||
func extractContainerFiles(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {
|
||||
// Files are now extracted directly by the integration tests
|
||||
// This function is kept for potential future use or other file types
|
||||
return nil
|
||||
}
|
||||
|
||||
// logExtractionError logs extraction errors with appropriate level based on error type.
|
||||
func logExtractionError(artifactType, containerName string, err error, verbose bool) {
|
||||
if errors.Is(err, ErrFileNotFoundInTar) {
|
||||
// File not found is expected and only logged in verbose mode
|
||||
if verbose {
|
||||
log.Printf("No %s found in container %s", artifactType, containerName)
|
||||
}
|
||||
} else {
|
||||
// Other errors are actual failures and should be logged as warnings
|
||||
log.Printf("Warning: failed to extract %s from %s: %v", artifactType, containerName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// extractSingleFile copies a single file from a container.
|
||||
func extractSingleFile(ctx context.Context, cli *client.Client, containerID, sourcePath, fileName, logsDir string, verbose bool) error {
|
||||
tarReader, _, err := cli.CopyFromContainer(ctx, containerID, sourcePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy %s from container: %w", sourcePath, err)
|
||||
}
|
||||
defer tarReader.Close()
|
||||
|
||||
// Extract the single file from the tar
|
||||
filePath := filepath.Join(logsDir, fileName)
|
||||
if err := extractFileFromTar(tarReader, filepath.Base(sourcePath), filePath); err != nil {
|
||||
return fmt.Errorf("failed to extract file from tar: %w", err)
|
||||
}
|
||||
|
||||
if verbose {
|
||||
log.Printf("Extracted %s from %s", fileName, containerID[:12])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractDirectory copies a directory from a container and extracts its contents.
|
||||
func extractDirectory(ctx context.Context, cli *client.Client, containerID, sourcePath, dirName, logsDir string, verbose bool) error {
|
||||
tarReader, _, err := cli.CopyFromContainer(ctx, containerID, sourcePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy %s from container: %w", sourcePath, err)
|
||||
}
|
||||
defer tarReader.Close()
|
||||
|
||||
// Create target directory
|
||||
targetDir := filepath.Join(logsDir, dirName)
|
||||
if err := os.MkdirAll(targetDir, 0o755); err != nil {
|
||||
return fmt.Errorf("failed to create directory %s: %w", targetDir, err)
|
||||
}
|
||||
|
||||
// Extract the directory from the tar
|
||||
if err := extractDirectoryFromTar(tarReader, targetDir); err != nil {
|
||||
return fmt.Errorf("failed to extract directory from tar: %w", err)
|
||||
}
|
||||
|
||||
if verbose {
|
||||
log.Printf("Extracted %s/ from %s", dirName, containerID[:12])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
351
cmd/hi/doctor.go
351
cmd/hi/doctor.go
@@ -1,351 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var ErrSystemChecksFailed = errors.New("system checks failed")
|
||||
|
||||
// DoctorResult represents the result of a single health check.
|
||||
type DoctorResult struct {
|
||||
Name string
|
||||
Status string // "PASS", "FAIL", "WARN"
|
||||
Message string
|
||||
Suggestions []string
|
||||
}
|
||||
|
||||
// runDoctorCheck performs comprehensive pre-flight checks for integration testing.
|
||||
func runDoctorCheck(ctx context.Context) error {
|
||||
results := []DoctorResult{}
|
||||
|
||||
// Check 1: Docker binary availability
|
||||
results = append(results, checkDockerBinary())
|
||||
|
||||
// Check 2: Docker daemon connectivity
|
||||
dockerResult := checkDockerDaemon(ctx)
|
||||
results = append(results, dockerResult)
|
||||
|
||||
// If Docker is available, run additional checks
|
||||
if dockerResult.Status == "PASS" {
|
||||
results = append(results, checkDockerContext(ctx))
|
||||
results = append(results, checkDockerSocket(ctx))
|
||||
results = append(results, checkGolangImage(ctx))
|
||||
}
|
||||
|
||||
// Check 3: Go installation
|
||||
results = append(results, checkGoInstallation())
|
||||
|
||||
// Check 4: Git repository
|
||||
results = append(results, checkGitRepository())
|
||||
|
||||
// Check 5: Required files
|
||||
results = append(results, checkRequiredFiles())
|
||||
|
||||
// Display results
|
||||
displayDoctorResults(results)
|
||||
|
||||
// Return error if any critical checks failed
|
||||
for _, result := range results {
|
||||
if result.Status == "FAIL" {
|
||||
return fmt.Errorf("%w - see details above", ErrSystemChecksFailed)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("✅ All system checks passed - ready to run integration tests!")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkDockerBinary verifies Docker binary is available.
|
||||
func checkDockerBinary() DoctorResult {
|
||||
_, err := exec.LookPath("docker")
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Binary",
|
||||
Status: "FAIL",
|
||||
Message: "Docker binary not found in PATH",
|
||||
Suggestions: []string{
|
||||
"Install Docker: https://docs.docker.com/get-docker/",
|
||||
"For macOS: consider using colima or Docker Desktop",
|
||||
"Ensure docker is in your PATH",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorResult{
|
||||
Name: "Docker Binary",
|
||||
Status: "PASS",
|
||||
Message: "Docker binary found",
|
||||
}
|
||||
}
|
||||
|
||||
// checkDockerDaemon verifies Docker daemon is running and accessible.
|
||||
func checkDockerDaemon(ctx context.Context) DoctorResult {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Daemon",
|
||||
Status: "FAIL",
|
||||
Message: fmt.Sprintf("Cannot create Docker client: %v", err),
|
||||
Suggestions: []string{
|
||||
"Start Docker daemon/service",
|
||||
"Check Docker Desktop is running (if using Docker Desktop)",
|
||||
"For colima: run 'colima start'",
|
||||
"Verify DOCKER_HOST environment variable if set",
|
||||
},
|
||||
}
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
_, err = cli.Ping(ctx)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Daemon",
|
||||
Status: "FAIL",
|
||||
Message: fmt.Sprintf("Cannot ping Docker daemon: %v", err),
|
||||
Suggestions: []string{
|
||||
"Ensure Docker daemon is running",
|
||||
"Check Docker socket permissions",
|
||||
"Try: docker info",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorResult{
|
||||
Name: "Docker Daemon",
|
||||
Status: "PASS",
|
||||
Message: "Docker daemon is running and accessible",
|
||||
}
|
||||
}
|
||||
|
||||
// checkDockerContext verifies Docker context configuration.
|
||||
func checkDockerContext(_ context.Context) DoctorResult {
|
||||
contextInfo, err := getCurrentDockerContext()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Context",
|
||||
Status: "WARN",
|
||||
Message: "Could not detect Docker context, using default settings",
|
||||
Suggestions: []string{
|
||||
"Check: docker context ls",
|
||||
"Consider setting up a specific context if needed",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if contextInfo == nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Context",
|
||||
Status: "PASS",
|
||||
Message: "Using default Docker context",
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorResult{
|
||||
Name: "Docker Context",
|
||||
Status: "PASS",
|
||||
Message: "Using Docker context: " + contextInfo.Name,
|
||||
}
|
||||
}
|
||||
|
||||
// checkDockerSocket verifies Docker socket accessibility.
|
||||
func checkDockerSocket(ctx context.Context) DoctorResult {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Socket",
|
||||
Status: "FAIL",
|
||||
Message: fmt.Sprintf("Cannot access Docker socket: %v", err),
|
||||
Suggestions: []string{
|
||||
"Check Docker socket permissions",
|
||||
"Add user to docker group: sudo usermod -aG docker $USER",
|
||||
"For colima: ensure socket is accessible",
|
||||
},
|
||||
}
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
info, err := cli.Info(ctx)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Socket",
|
||||
Status: "FAIL",
|
||||
Message: fmt.Sprintf("Cannot get Docker info: %v", err),
|
||||
Suggestions: []string{
|
||||
"Check Docker daemon status",
|
||||
"Verify socket permissions",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorResult{
|
||||
Name: "Docker Socket",
|
||||
Status: "PASS",
|
||||
Message: fmt.Sprintf("Docker socket accessible (Server: %s)", info.ServerVersion),
|
||||
}
|
||||
}
|
||||
|
||||
// checkGolangImage verifies we can access the golang Docker image.
|
||||
func checkGolangImage(ctx context.Context) DoctorResult {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Golang Image",
|
||||
Status: "FAIL",
|
||||
Message: "Cannot create Docker client for image check",
|
||||
}
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
goVersion := detectGoVersion()
|
||||
imageName := "golang:" + goVersion
|
||||
|
||||
// Check if we can pull the image
|
||||
err = ensureImageAvailable(ctx, cli, imageName, false)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Golang Image",
|
||||
Status: "FAIL",
|
||||
Message: fmt.Sprintf("Cannot pull golang image %s: %v", imageName, err),
|
||||
Suggestions: []string{
|
||||
"Check internet connectivity",
|
||||
"Verify Docker Hub access",
|
||||
"Try: docker pull " + imageName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorResult{
|
||||
Name: "Golang Image",
|
||||
Status: "PASS",
|
||||
Message: fmt.Sprintf("Golang image %s is available", imageName),
|
||||
}
|
||||
}
|
||||
|
||||
// checkGoInstallation verifies Go is installed and working.
|
||||
func checkGoInstallation() DoctorResult {
|
||||
_, err := exec.LookPath("go")
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Go Installation",
|
||||
Status: "FAIL",
|
||||
Message: "Go binary not found in PATH",
|
||||
Suggestions: []string{
|
||||
"Install Go: https://golang.org/dl/",
|
||||
"Ensure go is in your PATH",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", "version")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Go Installation",
|
||||
Status: "FAIL",
|
||||
Message: fmt.Sprintf("Cannot get Go version: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
version := strings.TrimSpace(string(output))
|
||||
|
||||
return DoctorResult{
|
||||
Name: "Go Installation",
|
||||
Status: "PASS",
|
||||
Message: version,
|
||||
}
|
||||
}
|
||||
|
||||
// checkGitRepository verifies we're in a git repository.
|
||||
func checkGitRepository() DoctorResult {
|
||||
cmd := exec.Command("git", "rev-parse", "--git-dir")
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Git Repository",
|
||||
Status: "FAIL",
|
||||
Message: "Not in a Git repository",
|
||||
Suggestions: []string{
|
||||
"Run from within the headscale git repository",
|
||||
"Clone the repository: git clone https://github.com/juanfont/headscale.git",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorResult{
|
||||
Name: "Git Repository",
|
||||
Status: "PASS",
|
||||
Message: "Running in Git repository",
|
||||
}
|
||||
}
|
||||
|
||||
// checkRequiredFiles verifies required files exist.
|
||||
func checkRequiredFiles() DoctorResult {
|
||||
requiredFiles := []string{
|
||||
"go.mod",
|
||||
"integration/",
|
||||
"cmd/hi/",
|
||||
}
|
||||
|
||||
var missingFiles []string
|
||||
for _, file := range requiredFiles {
|
||||
cmd := exec.Command("test", "-e", file)
|
||||
if err := cmd.Run(); err != nil {
|
||||
missingFiles = append(missingFiles, file)
|
||||
}
|
||||
}
|
||||
|
||||
if len(missingFiles) > 0 {
|
||||
return DoctorResult{
|
||||
Name: "Required Files",
|
||||
Status: "FAIL",
|
||||
Message: "Missing required files: " + strings.Join(missingFiles, ", "),
|
||||
Suggestions: []string{
|
||||
"Ensure you're in the headscale project root directory",
|
||||
"Check that integration/ directory exists",
|
||||
"Verify this is a complete headscale repository",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorResult{
|
||||
Name: "Required Files",
|
||||
Status: "PASS",
|
||||
Message: "All required files found",
|
||||
}
|
||||
}
|
||||
|
||||
// displayDoctorResults shows the results in a formatted way.
|
||||
func displayDoctorResults(results []DoctorResult) {
|
||||
log.Printf("🔍 System Health Check Results")
|
||||
log.Printf("================================")
|
||||
|
||||
for _, result := range results {
|
||||
var icon string
|
||||
switch result.Status {
|
||||
case "PASS":
|
||||
icon = "✅"
|
||||
case "WARN":
|
||||
icon = "⚠️"
|
||||
case "FAIL":
|
||||
icon = "❌"
|
||||
default:
|
||||
icon = "❓"
|
||||
}
|
||||
|
||||
log.Printf("%s %s: %s", icon, result.Name, result.Message)
|
||||
|
||||
if len(result.Suggestions) > 0 {
|
||||
for _, suggestion := range result.Suggestions {
|
||||
log.Printf(" 💡 %s", suggestion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("================================")
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/creachadair/command"
|
||||
"github.com/creachadair/flax"
|
||||
)
|
||||
|
||||
var runConfig RunConfig
|
||||
|
||||
func main() {
|
||||
root := command.C{
|
||||
Name: "hi",
|
||||
Help: "Headscale Integration test runner",
|
||||
Commands: []*command.C{
|
||||
{
|
||||
Name: "run",
|
||||
Help: "Run integration tests",
|
||||
Usage: "run [test-pattern] [flags]",
|
||||
SetFlags: command.Flags(flax.MustBind, &runConfig),
|
||||
Run: runIntegrationTest,
|
||||
},
|
||||
{
|
||||
Name: "doctor",
|
||||
Help: "Check system requirements for running integration tests",
|
||||
Run: func(env *command.Env) error {
|
||||
return runDoctorCheck(env.Context())
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "clean",
|
||||
Help: "Clean Docker resources",
|
||||
Commands: []*command.C{
|
||||
{
|
||||
Name: "networks",
|
||||
Help: "Prune unused Docker networks",
|
||||
Run: func(env *command.Env) error {
|
||||
return pruneDockerNetworks(env.Context())
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "images",
|
||||
Help: "Clean old test images",
|
||||
Run: func(env *command.Env) error {
|
||||
return cleanOldImages(env.Context())
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "containers",
|
||||
Help: "Kill all test containers",
|
||||
Run: func(env *command.Env) error {
|
||||
return killTestContainers(env.Context())
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "cache",
|
||||
Help: "Clean Go module cache volume",
|
||||
Run: func(env *command.Env) error {
|
||||
return cleanCacheVolume(env.Context())
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "all",
|
||||
Help: "Run all cleanup operations",
|
||||
Run: func(env *command.Env) error {
|
||||
return cleanAll(env.Context())
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
command.HelpCommand(nil),
|
||||
},
|
||||
}
|
||||
|
||||
env := root.NewEnv(nil).MergeFlags(true)
|
||||
command.RunOrFail(env, os.Args[1:])
|
||||
}
|
||||
|
||||
func cleanAll(ctx context.Context) error {
|
||||
if err := killTestContainers(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pruneDockerNetworks(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cleanOldImages(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cleanCacheVolume(ctx)
|
||||
}
|
||||
122
cmd/hi/run.go
122
cmd/hi/run.go
@@ -1,122 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/creachadair/command"
|
||||
)
|
||||
|
||||
var ErrTestPatternRequired = errors.New("test pattern is required as first argument or use --test flag")
|
||||
|
||||
type RunConfig struct {
|
||||
TestPattern string `flag:"test,Test pattern to run"`
|
||||
Timeout time.Duration `flag:"timeout,default=120m,Test timeout"`
|
||||
FailFast bool `flag:"failfast,default=true,Stop on first test failure"`
|
||||
UsePostgres bool `flag:"postgres,default=false,Use PostgreSQL instead of SQLite"`
|
||||
GoVersion string `flag:"go-version,Go version to use (auto-detected from go.mod)"`
|
||||
CleanBefore bool `flag:"clean-before,default=true,Clean resources before test"`
|
||||
CleanAfter bool `flag:"clean-after,default=true,Clean resources after test"`
|
||||
KeepOnFailure bool `flag:"keep-on-failure,default=false,Keep containers on test failure"`
|
||||
LogsDir string `flag:"logs-dir,default=control_logs,Control logs directory"`
|
||||
Verbose bool `flag:"verbose,default=false,Verbose output"`
|
||||
}
|
||||
|
||||
// runIntegrationTest executes the integration test workflow.
|
||||
func runIntegrationTest(env *command.Env) error {
|
||||
args := env.Args
|
||||
if len(args) > 0 && runConfig.TestPattern == "" {
|
||||
runConfig.TestPattern = args[0]
|
||||
}
|
||||
|
||||
if runConfig.TestPattern == "" {
|
||||
return ErrTestPatternRequired
|
||||
}
|
||||
|
||||
if runConfig.GoVersion == "" {
|
||||
runConfig.GoVersion = detectGoVersion()
|
||||
}
|
||||
|
||||
// Run pre-flight checks
|
||||
if runConfig.Verbose {
|
||||
log.Printf("Running pre-flight system checks...")
|
||||
}
|
||||
if err := runDoctorCheck(env.Context()); err != nil {
|
||||
return fmt.Errorf("pre-flight checks failed: %w", err)
|
||||
}
|
||||
|
||||
if runConfig.Verbose {
|
||||
log.Printf("Running test: %s", runConfig.TestPattern)
|
||||
log.Printf("Go version: %s", runConfig.GoVersion)
|
||||
log.Printf("Timeout: %s", runConfig.Timeout)
|
||||
log.Printf("Use PostgreSQL: %t", runConfig.UsePostgres)
|
||||
}
|
||||
|
||||
return runTestContainer(env.Context(), &runConfig)
|
||||
}
|
||||
|
||||
// detectGoVersion reads the Go version from go.mod file.
|
||||
func detectGoVersion() string {
|
||||
goModPath := filepath.Join("..", "..", "go.mod")
|
||||
|
||||
if _, err := os.Stat("go.mod"); err == nil {
|
||||
goModPath = "go.mod"
|
||||
} else if _, err := os.Stat("../../go.mod"); err == nil {
|
||||
goModPath = "../../go.mod"
|
||||
}
|
||||
|
||||
content, err := os.ReadFile(goModPath)
|
||||
if err != nil {
|
||||
return "1.24"
|
||||
}
|
||||
|
||||
lines := splitLines(string(content))
|
||||
for _, line := range lines {
|
||||
if len(line) > 3 && line[:3] == "go " {
|
||||
version := line[3:]
|
||||
if idx := indexOf(version, " "); idx != -1 {
|
||||
version = version[:idx]
|
||||
}
|
||||
|
||||
return version
|
||||
}
|
||||
}
|
||||
|
||||
return "1.24"
|
||||
}
|
||||
|
||||
// splitLines splits a string into lines without using strings.Split.
|
||||
func splitLines(s string) []string {
|
||||
var lines []string
|
||||
var current string
|
||||
|
||||
for _, char := range s {
|
||||
if char == '\n' {
|
||||
lines = append(lines, current)
|
||||
current = ""
|
||||
} else {
|
||||
current += string(char)
|
||||
}
|
||||
}
|
||||
|
||||
if current != "" {
|
||||
lines = append(lines, current)
|
||||
}
|
||||
|
||||
return lines
|
||||
}
|
||||
|
||||
// indexOf finds the first occurrence of substr in s.
|
||||
func indexOf(s, substr string) int {
|
||||
for i := 0; i <= len(s)-len(substr); i++ {
|
||||
if s[i:i+len(substr)] == substr {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
@@ -1,100 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrFileNotFoundInTar indicates a file was not found in the tar archive.
|
||||
var ErrFileNotFoundInTar = errors.New("file not found in tar")
|
||||
|
||||
// extractFileFromTar extracts a single file from a tar reader.
|
||||
func extractFileFromTar(tarReader io.Reader, fileName, outputPath string) error {
|
||||
tr := tar.NewReader(tarReader)
|
||||
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read tar header: %w", err)
|
||||
}
|
||||
|
||||
// Check if this is the file we're looking for
|
||||
if filepath.Base(header.Name) == fileName {
|
||||
if header.Typeflag == tar.TypeReg {
|
||||
// Create the output file
|
||||
outFile, err := os.Create(outputPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create output file: %w", err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
// Copy file contents
|
||||
if _, err := io.Copy(outFile, tr); err != nil {
|
||||
return fmt.Errorf("failed to copy file contents: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("%w: %s", ErrFileNotFoundInTar, fileName)
|
||||
}
|
||||
|
||||
// extractDirectoryFromTar extracts all files from a tar reader to a target directory.
|
||||
func extractDirectoryFromTar(tarReader io.Reader, targetDir string) error {
|
||||
tr := tar.NewReader(tarReader)
|
||||
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read tar header: %w", err)
|
||||
}
|
||||
|
||||
// Clean the path to prevent directory traversal
|
||||
cleanName := filepath.Clean(header.Name)
|
||||
if strings.Contains(cleanName, "..") {
|
||||
continue // Skip potentially dangerous paths
|
||||
}
|
||||
|
||||
targetPath := filepath.Join(targetDir, filepath.Base(cleanName))
|
||||
|
||||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
// Create directory
|
||||
if err := os.MkdirAll(targetPath, os.FileMode(header.Mode)); err != nil {
|
||||
return fmt.Errorf("failed to create directory %s: %w", targetPath, err)
|
||||
}
|
||||
case tar.TypeReg:
|
||||
// Create file
|
||||
outFile, err := os.Create(targetPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create file %s: %w", targetPath, err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(outFile, tr); err != nil {
|
||||
outFile.Close()
|
||||
return fmt.Errorf("failed to copy file contents: %w", err)
|
||||
}
|
||||
outFile.Close()
|
||||
|
||||
// Set file permissions
|
||||
if err := os.Chmod(targetPath, os.FileMode(header.Mode)); err != nil {
|
||||
return fmt.Errorf("failed to set file permissions: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -18,8 +18,10 @@ server_url: http://127.0.0.1:8080
|
||||
# listen_addr: 0.0.0.0:8080
|
||||
listen_addr: 127.0.0.1:8080
|
||||
|
||||
# Address to listen to /metrics and /debug, you may want
|
||||
# to keep this endpoint private to your internal network
|
||||
# Address to listen to /metrics, you may want
|
||||
# to keep this endpoint private to your internal
|
||||
# network
|
||||
#
|
||||
metrics_listen_addr: 127.0.0.1:9090
|
||||
|
||||
# Address to listen for gRPC.
|
||||
@@ -41,9 +43,9 @@ grpc_allow_insecure: false
|
||||
# The Noise section includes specific configuration for the
|
||||
# TS2021 Noise protocol
|
||||
noise:
|
||||
# The Noise private key is used to encrypt the traffic between headscale and
|
||||
# Tailscale clients when using the new Noise-based protocol. A missing key
|
||||
# will be automatically generated.
|
||||
# The Noise private key is used to encrypt the
|
||||
# traffic between headscale and Tailscale clients when
|
||||
# using the new Noise-based protocol.
|
||||
private_key_path: /var/lib/headscale/noise_private.key
|
||||
|
||||
# List of IP prefixes to allocate tailaddresses from.
|
||||
@@ -85,17 +87,16 @@ derp:
|
||||
region_code: "headscale"
|
||||
region_name: "Headscale Embedded DERP"
|
||||
|
||||
# Only allow clients associated with this server access
|
||||
verify_clients: true
|
||||
|
||||
# Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
|
||||
# When the embedded DERP server is enabled stun_listen_addr MUST be defined.
|
||||
#
|
||||
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
|
||||
stun_listen_addr: "0.0.0.0:3478"
|
||||
|
||||
# Private key used to encrypt the traffic between headscale DERP and
|
||||
# Tailscale clients. A missing key will be automatically generated.
|
||||
# Private key used to encrypt the traffic between headscale DERP
|
||||
# and Tailscale clients.
|
||||
# The private key file will be autogenerated if it's missing.
|
||||
#
|
||||
private_key_path: /var/lib/headscale/derp_server_private.key
|
||||
|
||||
# This flag can be used, so the DERP map entry for the embedded DERP server is not written automatically,
|
||||
@@ -273,10 +274,6 @@ dns:
|
||||
# `hostname.base_domain` (e.g., _myhost.example.com_).
|
||||
base_domain: example.com
|
||||
|
||||
# Whether to use the local DNS settings of a node (default) or override the
|
||||
# local DNS settings and force the use of Headscale's DNS configuration.
|
||||
override_local_dns: false
|
||||
|
||||
# List of DNS servers to expose to clients.
|
||||
nameservers:
|
||||
global:
|
||||
@@ -322,59 +319,50 @@ dns:
|
||||
# Note: for production you will want to set this to something like:
|
||||
unix_socket: /var/run/headscale/headscale.sock
|
||||
unix_socket_permission: "0770"
|
||||
|
||||
#
|
||||
# headscale supports experimental OpenID connect support,
|
||||
# it is still being tested and might have some bugs, please
|
||||
# help us test it.
|
||||
# OpenID Connect
|
||||
# oidc:
|
||||
# # Block startup until the identity provider is available and healthy.
|
||||
# only_start_if_oidc_is_available: true
|
||||
#
|
||||
# # OpenID Connect Issuer URL from the identity provider
|
||||
# issuer: "https://your-oidc.issuer.com/path"
|
||||
#
|
||||
# # Client ID from the identity provider
|
||||
# client_id: "your-oidc-client-id"
|
||||
#
|
||||
# # Client secret generated by the identity provider
|
||||
# # Note: client_secret and client_secret_path are mutually exclusive.
|
||||
# client_secret: "your-oidc-client-secret"
|
||||
# # Alternatively, set `client_secret_path` to read the secret from the file.
|
||||
# # It resolves environment variables, making integration to systemd's
|
||||
# # `LoadCredential` straightforward:
|
||||
# client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
|
||||
# # client_secret and client_secret_path are mutually exclusive.
|
||||
#
|
||||
# # The amount of time a node is authenticated with OpenID until it expires
|
||||
# # and needs to reauthenticate.
|
||||
# # The amount of time from a node is authenticated with OpenID until it
|
||||
# # expires and needs to reauthenticate.
|
||||
# # Setting the value to "0" will mean no expiry.
|
||||
# expiry: 180d
|
||||
#
|
||||
# # Use the expiry from the token received from OpenID when the user logged
|
||||
# # in. This will typically lead to frequent need to reauthenticate and should
|
||||
# # only be enabled if you know what you are doing.
|
||||
# # in, this will typically lead to frequent need to reauthenticate and should
|
||||
# # only been enabled if you know what you are doing.
|
||||
# # Note: enabling this will cause `oidc.expiry` to be ignored.
|
||||
# use_expiry_from_token: false
|
||||
#
|
||||
# # The OIDC scopes to use, defaults to "openid", "profile" and "email".
|
||||
# # Custom scopes can be configured as needed, be sure to always include the
|
||||
# # required "openid" scope.
|
||||
# scope: ["openid", "profile", "email"]
|
||||
# # Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
|
||||
# # parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
|
||||
#
|
||||
# # Provide custom key/value pairs which get sent to the identity provider's
|
||||
# # authorization endpoint.
|
||||
# scope: ["openid", "profile", "email", "custom"]
|
||||
# extra_params:
|
||||
# domain_hint: example.com
|
||||
#
|
||||
# # Only accept users whose email domain is part of the allowed_domains list.
|
||||
# # List allowed principal domains and/or users. If an authenticated user's domain is not in this list, the
|
||||
# # authentication request will be rejected.
|
||||
#
|
||||
# allowed_domains:
|
||||
# - example.com
|
||||
#
|
||||
# # Only accept users whose email address is part of the allowed_users list.
|
||||
# allowed_users:
|
||||
# - alice@example.com
|
||||
#
|
||||
# # Only accept users which are members of at least one group in the
|
||||
# # allowed_groups list.
|
||||
# # Note: Groups from keycloak have a leading '/'
|
||||
# allowed_groups:
|
||||
# - /headscale
|
||||
# allowed_users:
|
||||
# - alice@example.com
|
||||
#
|
||||
# # Optional: PKCE (Proof Key for Code Exchange) configuration
|
||||
# # PKCE adds an additional layer of security to the OAuth 2.0 authorization code flow
|
||||
@@ -383,11 +371,23 @@ unix_socket_permission: "0770"
|
||||
# pkce:
|
||||
# # Enable or disable PKCE support (default: false)
|
||||
# enabled: false
|
||||
#
|
||||
# # PKCE method to use:
|
||||
# # - plain: Use plain code verifier
|
||||
# # - S256: Use SHA256 hashed code verifier (default, recommended)
|
||||
# method: S256
|
||||
#
|
||||
# # Map legacy users from pre-0.24.0 versions of headscale to the new OIDC users
|
||||
# # by taking the username from the legacy user and matching it with the username
|
||||
# # provided by the OIDC. This is useful when migrating from legacy users to OIDC
|
||||
# # to force them using the unique identifier from the OIDC and to give them a
|
||||
# # proper display name and picture if available.
|
||||
# # Note that this will only work if the username from the legacy user is the same
|
||||
# # and there is a possibility for account takeover should a username have changed
|
||||
# # with the provider.
|
||||
# # When this feature is disabled, it will cause all new logins to be created as new users.
|
||||
# # Note this option will be removed in the future and should be set to false
|
||||
# # on all new installations, or when all users have logged in with OIDC once.
|
||||
# map_legacy_users: false
|
||||
|
||||
# Logtail configuration
|
||||
# Logtail is Tailscales logging and auditing infrastructure, it allows the control panel
|
||||
|
||||
@@ -40,62 +40,10 @@ official releases](../setup/install/official.md) for more information.
|
||||
In addition to that, you may use packages provided by the community or from distributions. Learn more in the
|
||||
[installation guide using community packages](../setup/install/community.md).
|
||||
|
||||
For convenience, we also [build container images with headscale](../setup/install/container.md). But **please be aware that
|
||||
For convenience, we also [build Docker images with headscale](../setup/install/container.md). But **please be aware that
|
||||
we don't officially support deploying headscale using Docker**. On our [Discord server](https://discord.gg/c84AZQhmpx)
|
||||
we have a "docker-issues" channel where you can ask for Docker-specific help to the community.
|
||||
|
||||
## Scaling / How many clients does Headscale support?
|
||||
|
||||
It depends. As often stated, Headscale is not enterprise software and our focus
|
||||
is homelabbers and self-hosters. Of course, we do not prevent people from using
|
||||
it in a commercial/professional setting and often get questions about scaling.
|
||||
|
||||
Please note that when Headscale is developed, performance is not part of the
|
||||
consideration as the main audience is considered to be users with a moddest
|
||||
amount of devices. We focus on correctness and feature parity with Tailscale
|
||||
SaaS over time.
|
||||
|
||||
To understand if you might be able to use Headscale for your usecase, I will
|
||||
describe two scenarios in an effort to explain what is the central bottleneck
|
||||
of Headscale:
|
||||
|
||||
1. An environment with 1000 servers
|
||||
|
||||
- they rarely "move" (change their endpoints)
|
||||
- new nodes are added rarely
|
||||
|
||||
2. An environment with 80 laptops/phones (end user devices)
|
||||
|
||||
- nodes move often, e.g. switching from home to office
|
||||
|
||||
Headscale calculates a map of all nodes that need to talk to each other,
|
||||
creating this "world map" requires a lot of CPU time. When an event that
|
||||
requires changes to this map happens, the whole "world" is recalculated, and a
|
||||
new "world map" is created for every node in the network.
|
||||
|
||||
This means that under certain conditions, Headscale can likely handle 100s
|
||||
of devices (maybe more), if there is _little to no change_ happening in the
|
||||
network. For example, in Scenario 1, the process of computing the world map is
|
||||
extremly demanding due to the size of the network, but when the map has been
|
||||
created and the nodes are not changing, the Headscale instance will likely
|
||||
return to a very low resource usage until the next time there is an event
|
||||
requiring the new map.
|
||||
|
||||
In the case of Scenario 2, the process of computing the world map is less
|
||||
demanding due to the smaller size of the network, however, the type of nodes
|
||||
will likely change frequently, which would lead to a constant resource usage.
|
||||
|
||||
Headscale will start to struggle when the two scenarios overlap, e.g. many nodes
|
||||
with frequent changes will cause the resource usage to remain constantly high.
|
||||
In the worst case scenario, the queue of nodes waiting for their map will grow
|
||||
to a point where Headscale never will be able to catch up, and nodes will never
|
||||
learn about the current state of the world.
|
||||
|
||||
We expect that the performance will improve over time as we improve the code
|
||||
base, but it is not a focus. In general, we will never make the tradeoff to make
|
||||
things faster on the cost of less maintainable or readable code. We are a small
|
||||
team and have to optimise for maintainabillity.
|
||||
|
||||
## Which database should I use?
|
||||
|
||||
We recommend the use of SQLite as database for headscale:
|
||||
@@ -108,9 +56,6 @@ We recommend the use of SQLite as database for headscale:
|
||||
The headscale project itself does not provide a tool to migrate from PostgreSQL to SQLite. Please have a look at [the
|
||||
related tools documentation](../ref/integration/tools.md) for migration tooling provided by the community.
|
||||
|
||||
The choice of database has little to no impact on the performance of the server,
|
||||
see [Scaling / How many clients does Headscale support?](#scaling-how-many-clients-does-headscale-support) for understanding how Headscale spends its resources.
|
||||
|
||||
## Why is my reverse proxy not working with headscale?
|
||||
|
||||
We don't know. We don't use reverse proxies with headscale ourselves, so we don't have any experience with them. We have
|
||||
@@ -121,16 +66,3 @@ help to the community.
|
||||
## Can I use headscale and tailscale on the same machine?
|
||||
|
||||
Running headscale on a machine that is also in the tailnet can cause problems with subnet routers, traffic relay nodes, and MagicDNS. It might work, but it is not supported.
|
||||
|
||||
## Why do two nodes see each other in their status, even if an ACL allows traffic only in one direction?
|
||||
|
||||
A frequent use case is to allow traffic only from one node to another, but not the other way around. For example, the
|
||||
workstation of an administrator should be able to connect to all nodes but the nodes themselves shouldn't be able to
|
||||
connect back to the administrator's node. Why do all nodes see the administrator's workstation in the output of
|
||||
`tailscale status`?
|
||||
|
||||
This is essentially how Tailscale works. If traffic is allowed to flow in one direction, then both nodes see each other
|
||||
in their output of `tailscale status`. Traffic is still filtered according to the ACL, with the exception of `tailscale
|
||||
ping` which is always allowed in either direction.
|
||||
|
||||
See also <https://tailscale.com/kb/1087/device-visibility>.
|
||||
|
||||
@@ -2,36 +2,31 @@
|
||||
|
||||
Headscale aims to implement a self-hosted, open source alternative to the Tailscale control server. Headscale's goal is
|
||||
to provide self-hosters and hobbyists with an open-source server they can use for their projects and labs. This page
|
||||
provides on overview of Headscale's feature and compatibility with the Tailscale control server:
|
||||
provides on overview of headscale's feature and compatibility with the Tailscale control server:
|
||||
|
||||
- [x] Full "base" support of Tailscale's features
|
||||
- [x] Node registration
|
||||
- [x] Interactive
|
||||
- [x] Pre authenticated key
|
||||
- [x] [DNS](../ref/dns.md)
|
||||
- [x] [DNS](https://tailscale.com/kb/1054/dns)
|
||||
- [x] [MagicDNS](https://tailscale.com/kb/1081/magicdns)
|
||||
- [x] [Global and restricted nameservers (split DNS)](https://tailscale.com/kb/1054/dns#nameservers)
|
||||
- [x] [search domains](https://tailscale.com/kb/1054/dns#search-domains)
|
||||
- [x] [Extra DNS records (Headscale only)](../ref/dns.md#setting-extra-dns-records)
|
||||
- [x] [Extra DNS records (headscale only)](../ref/dns.md#setting-extra-dns-records)
|
||||
- [x] [Taildrop (File Sharing)](https://tailscale.com/kb/1106/taildrop)
|
||||
- [x] [Routes](../ref/routes.md)
|
||||
- [x] [Subnet routers](../ref/routes.md#subnet-router)
|
||||
- [x] [Exit nodes](../ref/routes.md#exit-node)
|
||||
- [x] Routing advertising (including exit nodes)
|
||||
- [x] Dual stack (IPv4 and IPv6)
|
||||
- [x] Ephemeral nodes
|
||||
- [x] Embedded [DERP server](https://tailscale.com/kb/1232/derp-servers)
|
||||
- [x] Access control lists ([GitHub label "policy"](https://github.com/juanfont/headscale/labels/policy%20%F0%9F%93%9D))
|
||||
- [x] ACL management via API
|
||||
- [x] Some [Autogroups](https://tailscale.com/kb/1396/targets#autogroups), currently: `autogroup:internet`,
|
||||
`autogroup:nonroot`, `autogroup:member`, `autogroup:tagged`
|
||||
- [x] [Auto approvers](https://tailscale.com/kb/1337/acl-syntax#auto-approvers) for [subnet
|
||||
routers](../ref/routes.md#automatically-approve-routes-of-a-subnet-router) and [exit
|
||||
nodes](../ref/routes.md#automatically-approve-an-exit-node-with-auto-approvers)
|
||||
- [x] [Tailscale SSH](https://tailscale.com/kb/1193/tailscale-ssh)
|
||||
* [x] [Node registration using Single-Sign-On (OpenID Connect)](../ref/oidc.md) ([GitHub label "OIDC"](https://github.com/juanfont/headscale/labels/OIDC))
|
||||
- [x] `autogroup:internet`
|
||||
- [ ] `autogroup:self`
|
||||
- [ ] `autogroup:member`
|
||||
* [ ] Node registration using Single-Sign-On (OpenID Connect) ([GitHub label "OIDC"](https://github.com/juanfont/headscale/labels/OIDC))
|
||||
- [x] Basic registration
|
||||
- [x] Update user profile from identity provider
|
||||
- [ ] Dynamic ACL support
|
||||
- [ ] OIDC groups cannot be used in ACLs
|
||||
- [ ] [Funnel](https://tailscale.com/kb/1223/funnel) ([#1040](https://github.com/juanfont/headscale/issues/1040))
|
||||
- [ ] [Serve](https://tailscale.com/kb/1312/serve) ([#1234](https://github.com/juanfont/headscale/issues/1921))
|
||||
- [ ] [Network flow logs](https://tailscale.com/kb/1219/network-flow-logs) ([#1687](https://github.com/juanfont/headscale/issues/1687))
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
All headscale releases are available on the [GitHub release page](https://github.com/juanfont/headscale/releases). Those
|
||||
releases are available as binaries for various platforms and architectures, packages for Debian based systems and source
|
||||
code archives. Container images are available on [Docker Hub](https://hub.docker.com/r/headscale/headscale) and
|
||||
[GitHub Container Registry](https://github.com/juanfont/headscale/pkgs/container/headscale).
|
||||
code archives. Container images are available on [Docker Hub](https://hub.docker.com/r/headscale/headscale).
|
||||
|
||||
An Atom/RSS feed of headscale releases is available [here](https://github.com/juanfont/headscale/releases.atom).
|
||||
|
||||
|
||||
5
docs/packaging/README.md
Normal file
5
docs/packaging/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Packaging
|
||||
|
||||
We use [nFPM](https://nfpm.goreleaser.com/) for making `.deb`, `.rpm` and `.apk`.
|
||||
|
||||
This folder contains files we need to package with these releases.
|
||||
@@ -1,4 +1,5 @@
|
||||
[Unit]
|
||||
After=syslog.target
|
||||
After=network.target
|
||||
Description=headscale coordination server for Tailscale
|
||||
X-Restart-Triggers=/etc/headscale/config.yaml
|
||||
@@ -13,7 +14,7 @@ Restart=always
|
||||
RestartSec=5
|
||||
|
||||
WorkingDirectory=/var/lib/headscale
|
||||
ReadWritePaths=/var/lib/headscale
|
||||
ReadWritePaths=/var/lib/headscale /var/run
|
||||
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE CAP_CHOWN
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_CHOWN
|
||||
88
docs/packaging/postinstall.sh
Normal file
88
docs/packaging/postinstall.sh
Normal file
@@ -0,0 +1,88 @@
|
||||
#!/bin/sh
|
||||
# Determine OS platform
|
||||
# shellcheck source=/dev/null
|
||||
. /etc/os-release
|
||||
|
||||
HEADSCALE_EXE="/usr/bin/headscale"
|
||||
BSD_HIER=""
|
||||
HEADSCALE_RUN_DIR="/var/run/headscale"
|
||||
HEADSCALE_HOME_DIR="/var/lib/headscale"
|
||||
HEADSCALE_USER="headscale"
|
||||
HEADSCALE_GROUP="headscale"
|
||||
HEADSCALE_SHELL="/usr/sbin/nologin"
|
||||
|
||||
ensure_sudo() {
|
||||
if [ "$(id -u)" = "0" ]; then
|
||||
echo "Sudo permissions detected"
|
||||
else
|
||||
echo "No sudo permission detected, please run as sudo"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
ensure_headscale_path() {
|
||||
if [ ! -f "$HEADSCALE_EXE" ]; then
|
||||
echo "headscale not in default path, exiting..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf "Found headscale %s\n" "$HEADSCALE_EXE"
|
||||
}
|
||||
|
||||
create_headscale_user() {
|
||||
printf "PostInstall: Adding headscale user %s\n" "$HEADSCALE_USER"
|
||||
useradd -s "$HEADSCALE_SHELL" -d "$HEADSCALE_HOME_DIR" -c "headscale default user" "$HEADSCALE_USER"
|
||||
}
|
||||
|
||||
create_headscale_group() {
|
||||
if command -V systemctl >/dev/null 2>&1; then
|
||||
printf "PostInstall: Adding headscale group %s\n" "$HEADSCALE_GROUP"
|
||||
groupadd "$HEADSCALE_GROUP"
|
||||
|
||||
printf "PostInstall: Adding headscale user %s to group %s\n" "$HEADSCALE_USER" "$HEADSCALE_GROUP"
|
||||
usermod -a -G "$HEADSCALE_GROUP" "$HEADSCALE_USER"
|
||||
fi
|
||||
|
||||
if [ "$ID" = "alpine" ]; then
|
||||
printf "PostInstall: Adding headscale group %s\n" "$HEADSCALE_GROUP"
|
||||
addgroup "$HEADSCALE_GROUP"
|
||||
|
||||
printf "PostInstall: Adding headscale user %s to group %s\n" "$HEADSCALE_USER" "$HEADSCALE_GROUP"
|
||||
addgroup "$HEADSCALE_USER" "$HEADSCALE_GROUP"
|
||||
fi
|
||||
}
|
||||
|
||||
create_run_dir() {
|
||||
printf "PostInstall: Creating headscale run directory \n"
|
||||
mkdir -p "$HEADSCALE_RUN_DIR"
|
||||
|
||||
printf "PostInstall: Modifying group ownership of headscale run directory \n"
|
||||
chown "$HEADSCALE_USER":"$HEADSCALE_GROUP" "$HEADSCALE_RUN_DIR"
|
||||
}
|
||||
|
||||
summary() {
|
||||
echo "----------------------------------------------------------------------"
|
||||
echo " headscale package has been successfully installed."
|
||||
echo ""
|
||||
echo " Please follow the next steps to start the software:"
|
||||
echo ""
|
||||
echo " sudo systemctl enable headscale"
|
||||
echo " sudo systemctl start headscale"
|
||||
echo ""
|
||||
echo " Configuration settings can be adjusted here:"
|
||||
echo " ${BSD_HIER}/etc/headscale/config.yaml"
|
||||
echo ""
|
||||
echo "----------------------------------------------------------------------"
|
||||
}
|
||||
|
||||
#
|
||||
# Main body of the script
|
||||
#
|
||||
{
|
||||
ensure_sudo
|
||||
ensure_headscale_path
|
||||
create_headscale_user
|
||||
create_headscale_group
|
||||
create_run_dir
|
||||
summary
|
||||
}
|
||||
15
docs/packaging/postremove.sh
Normal file
15
docs/packaging/postremove.sh
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/bin/sh
|
||||
# Determine OS platform
|
||||
# shellcheck source=/dev/null
|
||||
. /etc/os-release
|
||||
|
||||
if command -V systemctl >/dev/null 2>&1; then
|
||||
echo "Stop and disable headscale service"
|
||||
systemctl stop headscale >/dev/null 2>&1 || true
|
||||
systemctl disable headscale >/dev/null 2>&1 || true
|
||||
echo "Running daemon-reload"
|
||||
systemctl daemon-reload || true
|
||||
fi
|
||||
|
||||
echo "Removing run directory"
|
||||
rm -rf "/var/run/headscale.sock"
|
||||
@@ -64,10 +64,10 @@ Here are the ACL's to implement the same permissions as above:
|
||||
// groups are collections of users having a common scope. A user can be in multiple groups
|
||||
// groups cannot be composed of groups
|
||||
"groups": {
|
||||
"group:boss": ["boss@"],
|
||||
"group:dev": ["dev1@", "dev2@"],
|
||||
"group:admin": ["admin1@"],
|
||||
"group:intern": ["intern1@"]
|
||||
"group:boss": ["boss"],
|
||||
"group:dev": ["dev1", "dev2"],
|
||||
"group:admin": ["admin1"],
|
||||
"group:intern": ["intern1"]
|
||||
},
|
||||
// tagOwners in tailscale is an association between a TAG and the people allowed to set this TAG on a server.
|
||||
// This is documented [here](https://tailscale.com/kb/1068/acl-tags#defining-a-tag)
|
||||
@@ -149,11 +149,13 @@ Here are the ACL's to implement the same permissions as above:
|
||||
},
|
||||
// developers have access to the internal network through the router.
|
||||
// the internal network is composed of HTTPS endpoints and Postgresql
|
||||
// database servers.
|
||||
// database servers. There's an additional rule to allow traffic to be
|
||||
// forwarded to the internal subnet, 10.20.0.0/16. See this issue
|
||||
// https://github.com/juanfont/headscale/issues/502
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:dev"],
|
||||
"dst": ["10.20.0.0/16:443,5432"]
|
||||
"dst": ["10.20.0.0/16:443,5432", "router.internal:0"]
|
||||
},
|
||||
|
||||
// servers should be able to talk to database in tcp/5432. Database should not be able to initiate connections to
|
||||
@@ -179,11 +181,11 @@ Here are the ACL's to implement the same permissions as above:
|
||||
|
||||
// We still have to allow internal users communications since nothing guarantees that each user have
|
||||
// their own users.
|
||||
{ "action": "accept", "src": ["boss@"], "dst": ["boss@:*"] },
|
||||
{ "action": "accept", "src": ["dev1@"], "dst": ["dev1@:*"] },
|
||||
{ "action": "accept", "src": ["dev2@"], "dst": ["dev2@:*"] },
|
||||
{ "action": "accept", "src": ["admin1@"], "dst": ["admin1@:*"] },
|
||||
{ "action": "accept", "src": ["intern1@"], "dst": ["intern1@:*"] }
|
||||
{ "action": "accept", "src": ["boss"], "dst": ["boss:*"] },
|
||||
{ "action": "accept", "src": ["dev1"], "dst": ["dev1:*"] },
|
||||
{ "action": "accept", "src": ["dev2"], "dst": ["dev2:*"] },
|
||||
{ "action": "accept", "src": ["admin1"], "dst": ["admin1:*"] },
|
||||
{ "action": "accept", "src": ["intern1"], "dst": ["intern1:*"] }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -5,9 +5,7 @@
|
||||
- `/etc/headscale`
|
||||
- `$HOME/.headscale`
|
||||
- the current working directory
|
||||
- To load the configuration from a different path, use:
|
||||
- the command line flag `-c`, `--config`
|
||||
- the environment variable `HEADSCALE_CONFIG`
|
||||
- Use the command line flag `-c`, `--config` to load the configuration from a different path
|
||||
- Validate the configuration file with: `headscale configtest`
|
||||
|
||||
!!! example "Get the [example configuration from the GitHub repository](https://github.com/juanfont/headscale/blob/main/config-example.yaml)"
|
||||
|
||||
@@ -9,10 +9,10 @@ Headscale allows to set extra DNS records which are made available via
|
||||
[MagicDNS](https://tailscale.com/kb/1081/magicdns). Extra DNS records can be configured either via static entries in the
|
||||
[configuration file](./configuration.md) or from a JSON file that Headscale continuously watches for changes:
|
||||
|
||||
- Use the `dns.extra_records` option in the [configuration file](./configuration.md) for entries that are static and
|
||||
* Use the `dns.extra_records` option in the [configuration file](./configuration.md) for entries that are static and
|
||||
don't change while Headscale is running. Those entries are processed when Headscale is starting up and changes to the
|
||||
configuration require a restart of Headscale.
|
||||
- For dynamic DNS records that may be added, updated or removed while Headscale is running or DNS records that are
|
||||
* For dynamic DNS records that may be added, updated or removed while Headscale is running or DNS records that are
|
||||
generated by scripts the option `dns.extra_records_path` in the [configuration file](./configuration.md) is useful.
|
||||
Set it to the absolute path of the JSON file containing DNS records and Headscale processes this file as it detects
|
||||
changes.
|
||||
@@ -25,6 +25,7 @@ hostname and port combination "http://hostname-in-magic-dns.myvpn.example.com:30
|
||||
|
||||
Currently, [only A and AAAA records are processed by Tailscale](https://github.com/tailscale/tailscale/blob/v1.78.3/ipn/ipnlocal/local.go#L4461-L4479).
|
||||
|
||||
|
||||
1. Configure extra DNS records using one of the available configuration options:
|
||||
|
||||
=== "Static entries, via `dns.extra_records`"
|
||||
@@ -75,14 +76,14 @@ hostname and port combination "http://hostname-in-magic-dns.myvpn.example.com:30
|
||||
|
||||
=== "Query with dig"
|
||||
|
||||
```console
|
||||
```shell
|
||||
dig +short grafana.myvpn.example.com
|
||||
100.64.0.3
|
||||
```
|
||||
|
||||
=== "Query with drill"
|
||||
|
||||
```console
|
||||
```shell
|
||||
drill -Q grafana.myvpn.example.com
|
||||
100.64.0.3
|
||||
```
|
||||
|
||||
51
docs/ref/exit-node.md
Normal file
51
docs/ref/exit-node.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Exit Nodes
|
||||
|
||||
## On the node
|
||||
|
||||
Register the node and make it advertise itself as an exit node:
|
||||
|
||||
```console
|
||||
$ sudo tailscale up --login-server https://headscale.example.com --advertise-exit-node
|
||||
```
|
||||
|
||||
If the node is already registered, it can advertise exit capabilities like this:
|
||||
|
||||
```console
|
||||
$ sudo tailscale set --advertise-exit-node
|
||||
```
|
||||
|
||||
To use a node as an exit node, IP forwarding must be enabled on the node. Check the official [Tailscale documentation](https://tailscale.com/kb/1019/subnets/?tab=linux#enable-ip-forwarding) for how to enable IP forwarding.
|
||||
|
||||
## On the control server
|
||||
|
||||
```console
|
||||
$ # list nodes
|
||||
$ headscale routes list
|
||||
ID | Node | Prefix | Advertised | Enabled | Primary
|
||||
1 | | 0.0.0.0/0 | false | false | -
|
||||
2 | | ::/0 | false | false | -
|
||||
3 | phobos | 0.0.0.0/0 | true | false | -
|
||||
4 | phobos | ::/0 | true | false | -
|
||||
|
||||
$ # enable routes for phobos
|
||||
$ headscale routes enable -r 3
|
||||
$ headscale routes enable -r 4
|
||||
|
||||
$ # Check node list again. The routes are now enabled.
|
||||
$ headscale routes list
|
||||
ID | Node | Prefix | Advertised | Enabled | Primary
|
||||
1 | | 0.0.0.0/0 | false | false | -
|
||||
2 | | ::/0 | false | false | -
|
||||
3 | phobos | 0.0.0.0/0 | true | true | -
|
||||
4 | phobos | ::/0 | true | true | -
|
||||
```
|
||||
|
||||
## On the client
|
||||
|
||||
The exit node can now be used with:
|
||||
|
||||
```console
|
||||
$ sudo tailscale set --exit-node phobos
|
||||
```
|
||||
|
||||
Check the official [Tailscale documentation](https://tailscale.com/kb/1103/exit-nodes#use-the-exit-node) for how to do it on your device.
|
||||
@@ -5,11 +5,9 @@
|
||||
This page contains community contributions. The projects listed here are not
|
||||
maintained by the headscale authors and are written by community members.
|
||||
|
||||
This page collects third-party tools, client libraries, and scripts related to headscale.
|
||||
This page collects third-party tools and scripts related to headscale.
|
||||
|
||||
| Name | Repository Link | Description |
|
||||
| --------------------- | --------------------------------------------------------------- | -------------------------------------------------------------------- |
|
||||
| tailscale-manager | [Github](https://github.com/singlestore-labs/tailscale-manager) | Dynamically manage Tailscale route advertisements |
|
||||
| headscalebacktosqlite | [Github](https://github.com/bigbozza/headscalebacktosqlite) | Migrate headscale from PostgreSQL back to SQLite |
|
||||
| headscale-pf | [Github](https://github.com/YouSysAdmin/headscale-pf) | Populates user groups based on user groups in Jumpcloud or Authentik |
|
||||
| headscale-client-go | [Github](https://github.com/hibare/headscale-client-go) | A Go client implementation for the Headscale HTTP API. |
|
||||
| Name | Repository Link | Description |
|
||||
| --------------------- | --------------------------------------------------------------- | ------------------------------------------------- |
|
||||
| tailscale-manager | [Github](https://github.com/singlestore-labs/tailscale-manager) | Dynamically manage Tailscale route advertisements |
|
||||
| headscalebacktosqlite | [Github](https://github.com/bigbozza/headscalebacktosqlite) | Migrate headscale from PostgreSQL back to SQLite |
|
||||
|
||||
@@ -7,14 +7,13 @@
|
||||
|
||||
Headscale doesn't provide a built-in web interface but users may pick one from the available options.
|
||||
|
||||
| Name | Repository Link | Description |
|
||||
| ---------------------- | ----------------------------------------------------------- | -------------------------------------------------------------------------------------------- |
|
||||
| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server |
|
||||
| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend environment required |
|
||||
| Headplane | [GitHub](https://github.com/tale/headplane) | An advanced Tailscale inspired frontend for headscale |
|
||||
| headscale-admin | [Github](https://github.com/GoodiesHQ/headscale-admin) | Headscale-Admin is meant to be a simple, modern web interface for headscale |
|
||||
| ouroboros | [Github](https://github.com/yellowsink/ouroboros) | Ouroboros is designed for users to manage their own devices, rather than for admins |
|
||||
| unraid-headscale-admin | [Github](https://github.com/ich777/unraid-headscale-admin) | A simple headscale admin UI for Unraid, it offers Local (`docker exec`) and API Mode |
|
||||
| headscale-console | [Github](https://github.com/rickli-cloud/headscale-console) | WebAssembly-based client supporting SSH, VNC and RDP with optional self-service capabilities |
|
||||
| Name | Repository Link | Description |
|
||||
| --------------- | ------------------------------------------------------- | ----------------------------------------------------------------------------------- |
|
||||
| headscale-webui | [Github](https://github.com/ifargle/headscale-webui) | A simple headscale web UI for small-scale deployments. |
|
||||
| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server |
|
||||
| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend environment required |
|
||||
| Headplane | [GitHub](https://github.com/tale/headplane) | An advanced Tailscale inspired frontend for headscale |
|
||||
| headscale-admin | [Github](https://github.com/GoodiesHQ/headscale-admin) | Headscale-Admin is meant to be a simple, modern web interface for headscale |
|
||||
| ouroboros | [Github](https://github.com/yellowsink/ouroboros) | Ouroboros is designed for users to manage their own devices, rather than for admins |
|
||||
|
||||
You can ask for support on our [Discord server](https://discord.gg/c84AZQhmpx) in the "web-interfaces" channel.
|
||||
|
||||
482
docs/ref/oidc.md
482
docs/ref/oidc.md
@@ -1,272 +1,168 @@
|
||||
# OpenID Connect
|
||||
|
||||
Headscale supports authentication via external identity providers using OpenID Connect (OIDC). It features:
|
||||
|
||||
- Autoconfiguration via OpenID Connect Discovery Protocol
|
||||
- [Proof Key for Code Exchange (PKCE) code verification](#enable-pkce-recommended)
|
||||
- [Authorization based on a user's domain, email address or group membership](#authorize-users-with-filters)
|
||||
- Synchronization of [standard OIDC claims](#supported-oidc-claims)
|
||||
|
||||
Please see [limitations](#limitations) for known issues and limitations.
|
||||
|
||||
## Configuration
|
||||
|
||||
OpenID requires configuration in Headscale and your identity provider:
|
||||
|
||||
- Headscale: The `oidc` section of the Headscale [configuration](configuration.md) contains all available configuration
|
||||
options along with a description and their default values.
|
||||
- Identity provider: Please refer to the official documentation of your identity provider for specific instructions.
|
||||
Additionally, there might be some useful hints in the [Identity provider specific
|
||||
configuration](#identity-provider-specific-configuration) section below.
|
||||
|
||||
### Basic configuration
|
||||
|
||||
A basic configuration connects Headscale to an identity provider and typically requires:
|
||||
|
||||
- OpenID Connect Issuer URL from the identity provider. Headscale uses the OpenID Connect Discovery Protocol 1.0 to
|
||||
automatically obtain OpenID configuration parameters (example: `https://sso.example.com`).
|
||||
- Client ID from the identity provider (example: `headscale`).
|
||||
- Client secret generated by the identity provider (example: `generated-secret`).
|
||||
- Redirect URI for your identity provider (example: `https://headscale.example.com/oidc/callback`).
|
||||
|
||||
=== "Headscale"
|
||||
|
||||
```yaml
|
||||
oidc:
|
||||
issuer: "https://sso.example.com"
|
||||
client_id: "headscale"
|
||||
client_secret: "generated-secret"
|
||||
```
|
||||
|
||||
=== "Identity provider"
|
||||
|
||||
* Create a new confidential client (`Client ID`, `Client secret`)
|
||||
* Add Headscale's OIDC callback URL as valid redirect URL: `https://headscale.example.com/oidc/callback`
|
||||
* Configure additional parameters to improve user experience such as: name, description, logo, …
|
||||
|
||||
### Enable PKCE (recommended)
|
||||
|
||||
Proof Key for Code Exchange (PKCE) adds an additional layer of security to the OAuth 2.0 authorization code flow by
|
||||
preventing authorization code interception attacks, see: <https://datatracker.ietf.org/doc/html/rfc7636>. PKCE is
|
||||
recommended and needs to be configured for Headscale and the identity provider alike:
|
||||
|
||||
=== "Headscale"
|
||||
|
||||
```yaml hl_lines="5-6"
|
||||
oidc:
|
||||
issuer: "https://sso.example.com"
|
||||
client_id: "headscale"
|
||||
client_secret: "generated-secret"
|
||||
pkce:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
=== "Identity provider"
|
||||
|
||||
* Enable PKCE for the headscale client
|
||||
* Set the PKCE challenge method to "S256"
|
||||
|
||||
### Authorize users with filters
|
||||
|
||||
Headscale allows to filter for allowed users based on their domain, email address or group membership. These filters can
|
||||
be helpful to apply additional restrictions and control which users are allowed to join. Filters are disabled by
|
||||
default, users are allowed to join once the authentication with the identity provider succeeds. In case multiple filters
|
||||
are configured, a user needs to pass all of them.
|
||||
|
||||
=== "Allowed domains"
|
||||
|
||||
* Check the email domain of each authenticating user against the list of allowed domains and only authorize users
|
||||
whose email domain matches `example.com`.
|
||||
* Access allowed: `alice@example.com`
|
||||
* Access denied: `bob@example.net`
|
||||
|
||||
```yaml hl_lines="5-6"
|
||||
oidc:
|
||||
issuer: "https://sso.example.com"
|
||||
client_id: "headscale"
|
||||
client_secret: "generated-secret"
|
||||
allowed_domains:
|
||||
- "example.com"
|
||||
```
|
||||
|
||||
=== "Allowed users/emails"
|
||||
|
||||
* Check the email address of each authenticating user against the list of allowed email addresses and only authorize
|
||||
users whose email is part of the `allowed_users` list.
|
||||
* Access allowed: `alice@example.com`, `bob@example.net`
|
||||
* Access denied: `mallory@example.net`
|
||||
|
||||
```yaml hl_lines="5-7"
|
||||
oidc:
|
||||
issuer: "https://sso.example.com"
|
||||
client_id: "headscale"
|
||||
client_secret: "generated-secret"
|
||||
allowed_users:
|
||||
- "alice@example.com"
|
||||
- "bob@example.net"
|
||||
```
|
||||
|
||||
=== "Allowed groups"
|
||||
|
||||
* Use the OIDC `groups` claim of each authenticating user to get their group membership and only authorize users
|
||||
which are members in at least one of the referenced groups.
|
||||
* Access allowed: users in the `headscale_users` group
|
||||
* Access denied: users without groups, users with other groups
|
||||
|
||||
```yaml hl_lines="5-7"
|
||||
oidc:
|
||||
issuer: "https://sso.example.com"
|
||||
client_id: "headscale"
|
||||
client_secret: "generated-secret"
|
||||
scope: ["openid", "profile", "email", "groups"]
|
||||
allowed_groups:
|
||||
- "headscale_users"
|
||||
```
|
||||
|
||||
### Customize node expiration
|
||||
|
||||
The node expiration is the amount of time a node is authenticated with OpenID Connect until it expires and needs to
|
||||
reauthenticate. The default node expiration is 180 days. This can either be customized or set to the expiration from the
|
||||
Access Token.
|
||||
|
||||
=== "Customize node expiration"
|
||||
|
||||
```yaml hl_lines="5"
|
||||
oidc:
|
||||
issuer: "https://sso.example.com"
|
||||
client_id: "headscale"
|
||||
client_secret: "generated-secret"
|
||||
expiry: 30d # Use 0 to disable node expiration
|
||||
```
|
||||
|
||||
=== "Use expiration from Access Token"
|
||||
|
||||
Please keep in mind that the Access Token is typically a short-lived token that expires within a few minutes. You
|
||||
will have to configure token expiration in your identity provider to avoid frequent reauthentication.
|
||||
|
||||
|
||||
```yaml hl_lines="5"
|
||||
oidc:
|
||||
issuer: "https://sso.example.com"
|
||||
client_id: "headscale"
|
||||
client_secret: "generated-secret"
|
||||
use_expiry_from_token: true
|
||||
```
|
||||
|
||||
!!! tip "Expire a node and force re-authentication"
|
||||
|
||||
A node can be expired immediately via:
|
||||
```console
|
||||
headscale node expire -i <NODE_ID>
|
||||
```
|
||||
|
||||
### Reference a user in the policy
|
||||
|
||||
You may refer to users in the Headscale policy via:
|
||||
|
||||
- Email address
|
||||
- Username
|
||||
- Provider identifier (only available in the database or from your identity provider)
|
||||
|
||||
!!! note "A user identifier in the policy must contain a single `@`"
|
||||
|
||||
The Headscale policy requires a single `@` to reference a user. If the username or provider identifier doesn't
|
||||
already contain a single `@`, it needs to be appended at the end. For example: the username `ssmith` has to be
|
||||
written as `ssmith@` to be correctly identified as user within the policy.
|
||||
|
||||
!!! warning "Email address or username might be updated by users"
|
||||
|
||||
Many identity providers allow users to update their own profile. Depending on the identity provider and its
|
||||
configuration, the values for username or email address might change over time. This might have unexpected
|
||||
consequences for Headscale where a policy might no longer work or a user might obtain more access by hijacking an
|
||||
existing username or email address.
|
||||
|
||||
## Supported OIDC claims
|
||||
|
||||
Headscale uses [the standard OIDC claims](https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims) to
|
||||
populate and update its local user profile on each login. OIDC claims are read from the ID Token or from the UserInfo
|
||||
endpoint.
|
||||
|
||||
| Headscale profile | OIDC claim | Notes / examples |
|
||||
| ------------------- | -------------------- | ------------------------------------------------------------------------------------------------- |
|
||||
| email address | `email` | Only used when `email_verified: true` |
|
||||
| display name | `name` | eg: `Sam Smith` |
|
||||
| username | `preferred_username` | Depends on identity provider, eg: `ssmith`, `ssmith@idp.example.com`, `\\example.com\ssmith` |
|
||||
| profile picture | `picture` | URL to a profile picture or avatar |
|
||||
| provider identifier | `iss`, `sub` | A stable and unique identifier for a user, typically a combination of `iss` and `sub` OIDC claims |
|
||||
| | `groups` | [Only used to filter for allowed groups](#authorize-users-with-filters) |
|
||||
|
||||
## Limitations
|
||||
|
||||
- Support for OpenID Connect aims to be generic and vendor independent. It offers only limited support for quirks of
|
||||
specific identity providers.
|
||||
- OIDC groups cannot be used in ACLs.
|
||||
- The username provided by the identity provider needs to adhere to this pattern:
|
||||
- The username must be at least two characters long.
|
||||
- It must only contain letters, digits, hyphens, dots, underscores, and up to a single `@`.
|
||||
- The username must start with a letter.
|
||||
- A user's email address is only synchronized to the local user profile when the identity provider marks the email
|
||||
address as verified (`email_verified: true`).
|
||||
|
||||
Please see the [GitHub label "OIDC"](https://github.com/juanfont/headscale/labels/OIDC) for OIDC related issues.
|
||||
|
||||
## Identity provider specific configuration
|
||||
|
||||
!!! warning "Third-party software and services"
|
||||
|
||||
This section of the documentation is specific for third-party software and services. We recommend users read the
|
||||
third-party documentation on how to configure and integrate an OIDC client. Please see the [Configuration
|
||||
section](#configuration) for a description of Headscale's OIDC related configuration settings.
|
||||
|
||||
Any identity provider with OpenID Connect support should "just work" with Headscale. The following identity providers
|
||||
are known to work:
|
||||
|
||||
- [Authelia](#authelia)
|
||||
- [Authentik](#authentik)
|
||||
- [Kanidm](#kanidm)
|
||||
- [Keycloak](#keycloak)
|
||||
|
||||
### Authelia
|
||||
|
||||
Authelia is fully supported by Headscale.
|
||||
|
||||
#### Additional configuration to authorize users based on filters
|
||||
|
||||
Authelia (4.39.0 or newer) no longer provides standard OIDC claims such as `email` or `groups` via the ID Token. The
|
||||
OIDC `email` and `groups` claims are used to [authorize users with filters](#authorize-users-with-filters). This extra
|
||||
configuration step is **only** needed if you need to authorize access based on one of the following user properties:
|
||||
|
||||
- domain
|
||||
- email address
|
||||
- group membership
|
||||
|
||||
Please follow the instructions from Authelia's documentation on how to [Restore Functionality Prior to Claims
|
||||
Parameter](https://www.authelia.com/integration/openid-connect/openid-connect-1.0-claims/#restore-functionality-prior-to-claims-parameter).
|
||||
|
||||
### Authentik
|
||||
|
||||
- Authentik is fully supported by Headscale.
|
||||
- [Headscale does not JSON Web Encryption](https://github.com/juanfont/headscale/issues/2446). Leave the field
|
||||
`Encryption Key` in the providers section unset.
|
||||
|
||||
### Google OAuth
|
||||
|
||||
!!! warning "No username due to missing preferred_username"
|
||||
|
||||
Google OAuth does not send the `preferred_username` claim when the scope `profile` is requested. The username in
|
||||
Headscale will be blank/not set.
|
||||
|
||||
In order to integrate Headscale with Google, you'll need to have a [Google Cloud
|
||||
Console](https://console.cloud.google.com) account.
|
||||
|
||||
Google OAuth has a [verification process](https://support.google.com/cloud/answer/9110914?hl=en) if you need to have
|
||||
users authenticate who are outside of your domain. If you only need to authenticate users from your domain name (ie
|
||||
`@example.com`), you don't need to go through the verification process.
|
||||
|
||||
However if you don't have a domain, or need to add users outside of your domain, you can manually add emails via Google
|
||||
Console.
|
||||
|
||||
#### Steps
|
||||
# Configuring headscale to use OIDC authentication
|
||||
|
||||
In order to authenticate users through a centralized solution one must enable the OIDC integration.
|
||||
|
||||
Known limitations:
|
||||
|
||||
- No dynamic ACL support
|
||||
- OIDC groups cannot be used in ACLs
|
||||
|
||||
## Basic configuration
|
||||
|
||||
In your `config.yaml`, customize this to your liking:
|
||||
|
||||
```yaml title="config.yaml"
|
||||
oidc:
|
||||
# Block further startup until the OIDC provider is healthy and available
|
||||
only_start_if_oidc_is_available: true
|
||||
# Specified by your OIDC provider
|
||||
issuer: "https://your-oidc.issuer.com/path"
|
||||
# Specified/generated by your OIDC provider
|
||||
client_id: "your-oidc-client-id"
|
||||
client_secret: "your-oidc-client-secret"
|
||||
# alternatively, set `client_secret_path` to read the secret from the file.
|
||||
# It resolves environment variables, making integration to systemd's
|
||||
# `LoadCredential` straightforward:
|
||||
#client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
|
||||
# as third option, it's also possible to load the oidc secret from environment variables
|
||||
# set HEADSCALE_OIDC_CLIENT_SECRET to the required value
|
||||
|
||||
# Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
|
||||
# parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
|
||||
scope: ["openid", "profile", "email", "custom"]
|
||||
# Optional: Passed on to the browser login request – used to tweak behaviour for the OIDC provider
|
||||
extra_params:
|
||||
domain_hint: example.com
|
||||
|
||||
# Optional: List allowed principal domains and/or users. If an authenticated user's domain is not in this list,
|
||||
# the authentication request will be rejected.
|
||||
allowed_domains:
|
||||
- example.com
|
||||
# Optional. Note that groups from Keycloak have a leading '/'.
|
||||
allowed_groups:
|
||||
- /headscale
|
||||
# Optional.
|
||||
allowed_users:
|
||||
- alice@example.com
|
||||
|
||||
# Optional: PKCE (Proof Key for Code Exchange) configuration
|
||||
# PKCE adds an additional layer of security to the OAuth 2.0 authorization code flow
|
||||
# by preventing authorization code interception attacks
|
||||
# See https://datatracker.ietf.org/doc/html/rfc7636
|
||||
pkce:
|
||||
# Enable or disable PKCE support (default: false)
|
||||
enabled: false
|
||||
# PKCE method to use:
|
||||
# - plain: Use plain code verifier
|
||||
# - S256: Use SHA256 hashed code verifier (default, recommended)
|
||||
method: S256
|
||||
|
||||
# If `strip_email_domain` is set to `true`, the domain part of the username email address will be removed.
|
||||
# This will transform `first-name.last-name@example.com` to the user `first-name.last-name`
|
||||
# If `strip_email_domain` is set to `false` the domain part will NOT be removed resulting to the following
|
||||
# user: `first-name.last-name.example.com`
|
||||
strip_email_domain: true
|
||||
```
|
||||
|
||||
## Azure AD example
|
||||
|
||||
In order to integrate headscale with Azure Active Directory, we'll need to provision an App Registration with the correct scopes and redirect URI. Here with Terraform:
|
||||
|
||||
```hcl title="terraform.hcl"
|
||||
resource "azuread_application" "headscale" {
|
||||
display_name = "Headscale"
|
||||
|
||||
sign_in_audience = "AzureADMyOrg"
|
||||
fallback_public_client_enabled = false
|
||||
|
||||
required_resource_access {
|
||||
// Microsoft Graph
|
||||
resource_app_id = "00000003-0000-0000-c000-000000000000"
|
||||
|
||||
resource_access {
|
||||
// scope: profile
|
||||
id = "14dad69e-099b-42c9-810b-d002981feec1"
|
||||
type = "Scope"
|
||||
}
|
||||
resource_access {
|
||||
// scope: openid
|
||||
id = "37f7f235-527c-4136-accd-4a02d197296e"
|
||||
type = "Scope"
|
||||
}
|
||||
resource_access {
|
||||
// scope: email
|
||||
id = "64a6cdd6-aab1-4aaf-94b8-3cc8405e90d0"
|
||||
type = "Scope"
|
||||
}
|
||||
}
|
||||
web {
|
||||
# Points at your running headscale instance
|
||||
redirect_uris = ["https://headscale.example.com/oidc/callback"]
|
||||
|
||||
implicit_grant {
|
||||
access_token_issuance_enabled = false
|
||||
id_token_issuance_enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
group_membership_claims = ["SecurityGroup"]
|
||||
optional_claims {
|
||||
# Expose group memberships
|
||||
id_token {
|
||||
name = "groups"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "azuread_application_password" "headscale-application-secret" {
|
||||
display_name = "Headscale Server"
|
||||
application_object_id = azuread_application.headscale.object_id
|
||||
}
|
||||
|
||||
resource "azuread_service_principal" "headscale" {
|
||||
application_id = azuread_application.headscale.application_id
|
||||
}
|
||||
|
||||
resource "azuread_service_principal_password" "headscale" {
|
||||
service_principal_id = azuread_service_principal.headscale.id
|
||||
end_date_relative = "44640h"
|
||||
}
|
||||
|
||||
output "headscale_client_id" {
|
||||
value = azuread_application.headscale.application_id
|
||||
}
|
||||
|
||||
output "headscale_client_secret" {
|
||||
value = azuread_application_password.headscale-application-secret.value
|
||||
}
|
||||
```
|
||||
|
||||
And in your headscale `config.yaml`:
|
||||
|
||||
```yaml title="config.yaml"
|
||||
oidc:
|
||||
issuer: "https://login.microsoftonline.com/<tenant-UUID>/v2.0"
|
||||
client_id: "<client-id-from-terraform>"
|
||||
client_secret: "<client-secret-from-terraform>"
|
||||
|
||||
# Optional: add "groups"
|
||||
scope: ["openid", "profile", "email"]
|
||||
extra_params:
|
||||
# Use your own domain, associated with Azure AD
|
||||
domain_hint: example.com
|
||||
# Optional: Force the Azure AD account picker
|
||||
prompt: select_account
|
||||
```
|
||||
|
||||
## Google OAuth Example
|
||||
|
||||
In order to integrate headscale with Google, you'll need to have a [Google Cloud Console](https://console.cloud.google.com) account.
|
||||
|
||||
Google OAuth has a [verification process](https://support.google.com/cloud/answer/9110914?hl=en) if you need to have users authenticate who are outside of your domain. If you only need to authenticate users from your domain name (ie `@example.com`), you don't need to go through the verification process.
|
||||
|
||||
However if you don't have a domain, or need to add users outside of your domain, you can manually add emails via Google Console.
|
||||
|
||||
### Steps
|
||||
|
||||
1. Go to [Google Console](https://console.cloud.google.com) and login or create an account if you don't have one.
|
||||
2. Create a project (if you don't already have one).
|
||||
@@ -274,44 +170,16 @@ Console.
|
||||
4. Click `Create Credentials` -> `OAuth client ID`
|
||||
5. Under `Application Type`, choose `Web Application`
|
||||
6. For `Name`, enter whatever you like
|
||||
7. Under `Authorised redirect URIs`, add Headscale's OIDC callback URL: `https://headscale.example.com/oidc/callback`
|
||||
7. Under `Authorised redirect URIs`, use `https://example.com/oidc/callback`, replacing example.com with your headscale URL.
|
||||
8. Click `Save` at the bottom of the form
|
||||
9. Take note of the `Client ID` and `Client secret`, you can also download it for reference if you need it.
|
||||
10. [Configure Headscale following the "Basic configuration" steps](#basic-configuration). The issuer URL for Google
|
||||
OAuth is: `https://accounts.google.com`.
|
||||
10. Edit your headscale config, under `oidc`, filling in your `client_id` and `client_secret`:
|
||||
```yaml title="config.yaml"
|
||||
oidc:
|
||||
issuer: "https://accounts.google.com"
|
||||
client_id: ""
|
||||
client_secret: ""
|
||||
scope: ["openid", "profile", "email"]
|
||||
```
|
||||
|
||||
### Kanidm
|
||||
|
||||
- Kanidm is fully supported by Headscale.
|
||||
- Groups for the [allowed groups filter](#authorize-users-with-filters) need to be specified with their full SPN, for
|
||||
example: `headscale_users@sso.example.com`.
|
||||
|
||||
### Keycloak
|
||||
|
||||
Keycloak is fully supported by Headscale.
|
||||
|
||||
#### Additional configuration to use the allowed groups filter
|
||||
|
||||
Keycloak has no built-in client scope for the OIDC `groups` claim. This extra configuration step is **only** needed if
|
||||
you need to [authorize access based on group membership](#authorize-users-with-filters).
|
||||
|
||||
- Create a new client scope `groups` for OpenID Connect:
|
||||
- Configure a `Group Membership` mapper with name `groups` and the token claim name `groups`.
|
||||
- Enable the mapper for the ID Token, Access Token and UserInfo endpoint.
|
||||
- Configure the new client scope for your Headscale client:
|
||||
- Edit the Headscale client.
|
||||
- Search for the client scope `group`.
|
||||
- Add it with assigned type `Default`.
|
||||
- [Configure the allowed groups in Headscale](#authorize-users-with-filters). Keep in mind that groups in Keycloak start
|
||||
with a leading `/`.
|
||||
|
||||
### Microsoft Entra ID
|
||||
|
||||
In order to integrate Headscale with Microsoft Entra ID, you'll need to provision an App Registration with the correct
|
||||
scopes and redirect URI.
|
||||
|
||||
[Configure Headscale following the "Basic configuration" steps](#basic-configuration). The issuer URL for Microsoft
|
||||
Entra ID is: `https://login.microsoftonline.com/<tenant-UUID>/v2.0`. The following `extra_params` might be useful:
|
||||
|
||||
- `domain_hint: example.com` to use your own domain
|
||||
- `prompt: select_account` to force an account picker during login
|
||||
You can also use `allowed_domains` and `allowed_users` to restrict the users who can authenticate.
|
||||
|
||||
@@ -1,271 +0,0 @@
|
||||
# Routes
|
||||
|
||||
Headscale supports route advertising and can be used to manage [subnet routers](https://tailscale.com/kb/1019/subnets)
|
||||
and [exit nodes](https://tailscale.com/kb/1103/exit-nodes) for a tailnet.
|
||||
|
||||
- [Subnet routers](#subnet-router) may be used to connect an existing network such as a virtual
|
||||
private cloud or an on-premise network with your tailnet. Use a subnet router to access devices where Tailscale can't
|
||||
be installed or to gradually rollout Tailscale.
|
||||
- [Exit nodes](#exit-node) can be used to route all Internet traffic for another Tailscale
|
||||
node. Use it to securely access the Internet on an untrusted Wi-Fi or to access online services that expect traffic
|
||||
from a specific IP address.
|
||||
|
||||
## Subnet router
|
||||
|
||||
The setup of a subnet router requires double opt-in, once from a subnet router and once on the control server to allow
|
||||
its use within the tailnet. Optionally, use [`autoApprovers` to automatically approve routes from a subnet
|
||||
router](#automatically-approve-routes-of-a-subnet-router).
|
||||
|
||||
### Setup a subnet router
|
||||
|
||||
#### Configure a node as subnet router
|
||||
|
||||
Register a node and advertise the routes it should handle as comma separated list:
|
||||
|
||||
```console
|
||||
$ sudo tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-routes=10.0.0.0/8,192.168.0.0/24
|
||||
```
|
||||
|
||||
If the node is already registered, it can advertise new routes or update previously announced routes with:
|
||||
|
||||
```console
|
||||
$ sudo tailscale set --advertise-routes=10.0.0.0/8,192.168.0.0/24
|
||||
```
|
||||
|
||||
Finally, [enable IP forwarding](#enable-ip-forwarding) to route traffic.
|
||||
|
||||
#### Enable the subnet router on the control server
|
||||
|
||||
The routes of a tailnet can be displayed with the `headscale nodes list-routes` command. A subnet router with the
|
||||
hostname `myrouter` announced the IPv4 networks `10.0.0.0/8` and `192.168.0.0/24`. Those need to be approved before they
|
||||
can be used.
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myrouter | | 10.0.0.0/8, 192.168.0.0/24 |
|
||||
```
|
||||
|
||||
Approve all desired routes of a subnet router by specifying them as comma separated list:
|
||||
|
||||
```console
|
||||
$ headscale nodes approve-routes --node 1 --routes 10.0.0.0/8,192.168.0.0/24
|
||||
Node updated
|
||||
```
|
||||
|
||||
The node `myrouter` can now route the IPv4 networks `10.0.0.0/8` and `192.168.0.0/24` for the tailnet.
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myrouter | 10.0.0.0/8, 192.168.0.0/24 | 10.0.0.0/8, 192.168.0.0/24 | 10.0.0.0/8, 192.168.0.0/24
|
||||
```
|
||||
|
||||
#### Use the subnet router
|
||||
|
||||
To accept routes advertised by a subnet router on a node:
|
||||
|
||||
```console
|
||||
$ sudo tailscale set --accept-routes
|
||||
```
|
||||
|
||||
Please refer to the official [Tailscale
|
||||
documentation](https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-devices) for how to use a subnet
|
||||
router on different operating systems.
|
||||
|
||||
### Restrict the use of a subnet router with ACL
|
||||
|
||||
The routes announced by subnet routers are available to the nodes in a tailnet. By default, without an ACL enabled, all
|
||||
nodes can accept and use such routes. Configure an ACL to explicitly manage who can use routes.
|
||||
|
||||
The ACL snippet below defines three hosts, a subnet router `router`, a regular node `node` and `service.example.net` as
|
||||
internal service that can be reached via a route on the subnet router `router`. It allows the node `node` to access
|
||||
`service.example.net` on port 80 and 443 which is reachable via the subnet router. Access to the subnet router itself is
|
||||
denied.
|
||||
|
||||
```json title="Access the routes of a subnet router without the subnet router itself"
|
||||
{
|
||||
"hosts": {
|
||||
// the router is not referenced but announces 192.168.0.0/24"
|
||||
"router": "100.64.0.1/32",
|
||||
"node": "100.64.0.2/32",
|
||||
"service.example.net": "192.168.0.1/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["node"],
|
||||
"dst": ["service.example.net:80,443"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Automatically approve routes of a subnet router
|
||||
|
||||
The initial setup of a subnet router usually requires manual approval of their announced routes on the control server
|
||||
before they can be used by a node in a tailnet. Headscale supports the `autoApprovers` section of an ACL to automate the
|
||||
approval of routes served with a subnet router.
|
||||
|
||||
The ACL snippet below defines the tag `tag:router` owned by the user `alice`. This tag is used for `routes` in the
|
||||
`autoApprovers` section. The IPv4 route `192.168.0.0/24` is automatically approved once announced by a subnet router
|
||||
owned by the user `alice` and that also advertises the tag `tag:router`.
|
||||
|
||||
```json title="Subnet routers owned by alice and tagged with tag:router are automatically approved"
|
||||
{
|
||||
"tagOwners": {
|
||||
"tag:router": ["alice@"]
|
||||
},
|
||||
"autoApprovers": {
|
||||
"routes": {
|
||||
"192.168.0.0/24": ["tag:router"]
|
||||
}
|
||||
},
|
||||
"acls": [
|
||||
// more rules
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Advertise the route `192.168.0.0/24` from a subnet router that also advertises the tag `tag:router` when joining the tailnet:
|
||||
|
||||
```console
|
||||
$ sudo tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags tag:router --advertise-routes 192.168.0.0/24
|
||||
```
|
||||
|
||||
Please see the [official Tailscale documentation](https://tailscale.com/kb/1337/acl-syntax#autoapprovers) for more
|
||||
information on auto approvers.
|
||||
|
||||
## Exit node
|
||||
|
||||
The setup of an exit node requires double opt-in, once from an exit node and once on the control server to allow its use
|
||||
within the tailnet. Optionally, use [`autoApprovers` to automatically approve an exit
|
||||
node](#automatically-approve-an-exit-node-with-auto-approvers).
|
||||
|
||||
### Setup an exit node
|
||||
|
||||
#### Configure a node as exit node
|
||||
|
||||
Register a node and make it advertise itself as an exit node:
|
||||
|
||||
```console
|
||||
$ sudo tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-exit-node
|
||||
```
|
||||
|
||||
If the node is already registered, it can advertise exit capabilities like this:
|
||||
|
||||
```console
|
||||
$ sudo tailscale set --advertise-exit-node
|
||||
```
|
||||
|
||||
Finally, [enable IP forwarding](#enable-ip-forwarding) to route traffic.
|
||||
|
||||
#### Enable the exit node on the control server
|
||||
|
||||
The routes of a tailnet can be displayed with the `headscale nodes list-routes` command. An exit node can be recognized
|
||||
by its announced routes: `0.0.0.0/0` for IPv4 and `::/0` for IPv6. The exit node with the hostname `myexit` is already
|
||||
available, but needs to be approved:
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myexit | | 0.0.0.0/0, ::/0 |
|
||||
```
|
||||
|
||||
For exit nodes, it is sufficient to approve either the IPv4 or IPv6 route. The other will be approved automatically.
|
||||
|
||||
```console
|
||||
$ headscale nodes approve-routes --node 1 --routes 0.0.0.0/0
|
||||
Node updated
|
||||
```
|
||||
|
||||
The node `myexit` is now approved as exit node for the tailnet:
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myexit | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0
|
||||
```
|
||||
|
||||
#### Use the exit node
|
||||
|
||||
The exit node can now be used on a node with:
|
||||
|
||||
```console
|
||||
$ sudo tailscale set --exit-node myexit
|
||||
```
|
||||
|
||||
Please refer to the official [Tailscale documentation](https://tailscale.com/kb/1103/exit-nodes#use-the-exit-node) for
|
||||
how to use an exit node on different operating systems.
|
||||
|
||||
### Restrict the use of an exit node with ACL
|
||||
|
||||
An exit node is offered to all nodes in a tailnet. By default, without an ACL enabled, all nodes in a tailnet can select
|
||||
and use an exit node. Configure `autogroup:internet` in an ACL rule to restrict who can use _any_ of the available exit
|
||||
nodes.
|
||||
|
||||
```json title="Example use of autogroup:internet"
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["..."],
|
||||
"dst": ["autogroup:internet:*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Automatically approve an exit node with auto approvers
|
||||
|
||||
The initial setup of an exit node usually requires manual approval on the control server before it can be used by a node
|
||||
in a tailnet. Headscale supports the `autoApprovers` section of an ACL to automate the approval of a new exit node as
|
||||
soon as it joins the tailnet.
|
||||
|
||||
The ACL snippet below defines the tag `tag:exit` owned by the user `alice`. This tag is used for `exitNode` in the
|
||||
`autoApprovers` section. A new exit node which is owned by the user `alice` and that also advertises the tag `tag:exit`
|
||||
is automatically approved:
|
||||
|
||||
```json title="Exit nodes owned by alice and tagged with tag:exit are automatically approved"
|
||||
{
|
||||
"tagOwners": {
|
||||
"tag:exit": ["alice@"]
|
||||
},
|
||||
"autoApprovers": {
|
||||
"exitNode": ["tag:exit"]
|
||||
},
|
||||
"acls": [
|
||||
// more rules
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Advertise a node as exit node and also advertise the tag `tag:exit` when joining the tailnet:
|
||||
|
||||
```console
|
||||
$ sudo tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags tag:exit --advertise-exit-node
|
||||
```
|
||||
|
||||
Please see the [official Tailscale documentation](https://tailscale.com/kb/1337/acl-syntax#autoapprovers) for more
|
||||
information on auto approvers.
|
||||
|
||||
## High availability
|
||||
|
||||
Headscale has limited support for high availability routing. Multiple subnet routers with overlapping routes or multiple
|
||||
exit nodes can be used to provide high availability for users. If one router node goes offline, another one can serve
|
||||
the same routes to clients. Please see the official [Tailscale documentation on high
|
||||
availability](https://tailscale.com/kb/1115/high-availability#subnet-router-high-availability) for details.
|
||||
|
||||
!!! bug
|
||||
|
||||
In certain situations it might take up to 16 minutes for Headscale to detect a node as offline. A failover node
|
||||
might not be selected fast enough, if such a node is used as subnet router or exit node causing service
|
||||
interruptions for clients. See [issue 2129](https://github.com/juanfont/headscale/issues/2129) for more information.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Enable IP forwarding
|
||||
|
||||
A subnet router or exit node is routing traffic on behalf of other nodes and thus requires IP forwarding. Check the
|
||||
official [Tailscale documentation](https://tailscale.com/kb/1019/subnets/?tab=linux#enable-ip-forwarding) for how to
|
||||
enable IP forwarding.
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## Bring your own certificate
|
||||
|
||||
Headscale can be configured to expose its web service via TLS. To configure the certificate and key file manually, set the `tls_cert_path` and `tls_key_path` configuration parameters. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
Headscale can be configured to expose its web service via TLS. To configure the certificate and key file manually, set the `tls_cert_path` and `tls_cert_path` configuration parameters. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
|
||||
```yaml title="config.yaml"
|
||||
tls_cert_path: ""
|
||||
@@ -52,7 +52,7 @@ If you want to validate that certificate renewal completed successfully, this ca
|
||||
1. Open the URL for your headscale server in your browser of choice, and manually inspecting the expiry date of the certificate you receive.
|
||||
2. Or, check remotely from CLI using `openssl`:
|
||||
|
||||
```console
|
||||
```bash
|
||||
$ openssl s_client -servername [hostname] -connect [hostname]:443 | openssl x509 -noout -dates
|
||||
(...)
|
||||
notBefore=Feb 8 09:48:26 2024 GMT
|
||||
|
||||
@@ -7,64 +7,65 @@
|
||||
|
||||
**It might be outdated and it might miss necessary steps**.
|
||||
|
||||
This documentation has the goal of showing a user how-to set up and run headscale in a container. A container runtime
|
||||
such as [Docker](https://www.docker.com) or [Podman](https://podman.io) is required. The container image can be found on
|
||||
[Docker Hub](https://hub.docker.com/r/headscale/headscale) and [GitHub Container
|
||||
Registry](https://github.com/juanfont/headscale/pkgs/container/headscale). The container image URLs are:
|
||||
|
||||
- [Docker Hub](https://hub.docker.com/r/headscale/headscale): `docker.io/headscale/headscale:<VERSION>`
|
||||
- [GitHub Container Registry](https://github.com/juanfont/headscale/pkgs/container/headscale):
|
||||
`ghcr.io/juanfont/headscale:<VERSION>`
|
||||
This documentation has the goal of showing a user how-to set up and run headscale in a container.
|
||||
[Docker](https://www.docker.com) is used as the reference container implementation, but there is no reason that it should
|
||||
not work with alternatives like [Podman](https://podman.io). The Docker image can be found on Docker Hub [here](https://hub.docker.com/r/headscale/headscale).
|
||||
|
||||
## Configure and run headscale
|
||||
|
||||
1. Create a directory on the Docker host to store headscale's [configuration](../../ref/configuration.md) and the [SQLite](https://www.sqlite.org/) database:
|
||||
1. Prepare a directory on the host Docker node in your directory of choice, used to hold headscale configuration and the [SQLite](https://www.sqlite.org/) database:
|
||||
|
||||
```shell
|
||||
mkdir -p ./headscale/{config,lib,run}
|
||||
mkdir -p ./headscale/config
|
||||
cd ./headscale
|
||||
```
|
||||
|
||||
1. Download the example configuration for your chosen version and save it as: `$(pwd)/config/config.yaml`. Adjust the
|
||||
1. Download the example configuration for your chosen version and save it as: `/etc/headscale/config.yaml`. Adjust the
|
||||
configuration to suit your local environment. See [Configuration](../../ref/configuration.md) for details.
|
||||
|
||||
1. Start headscale from within the previously created `./headscale` directory:
|
||||
```shell
|
||||
sudo mkdir -p /etc/headscale
|
||||
sudo nano /etc/headscale/config.yaml
|
||||
```
|
||||
|
||||
Alternatively, you can mount `/var/lib` and `/var/run` from your host system by adding
|
||||
`--volume $(pwd)/lib:/var/lib/headscale` and `--volume $(pwd)/run:/var/run/headscale`
|
||||
in the next step.
|
||||
|
||||
1. Start the headscale server while working in the host headscale directory:
|
||||
|
||||
```shell
|
||||
docker run \
|
||||
--name headscale \
|
||||
--detach \
|
||||
--volume "$(pwd)/config:/etc/headscale" \
|
||||
--volume "$(pwd)/lib:/var/lib/headscale" \
|
||||
--volume "$(pwd)/run:/var/run/headscale" \
|
||||
--volume $(pwd)/config:/etc/headscale/ \
|
||||
--publish 127.0.0.1:8080:8080 \
|
||||
--publish 127.0.0.1:9090:9090 \
|
||||
docker.io/headscale/headscale:<VERSION> \
|
||||
headscale/headscale:<VERSION> \
|
||||
serve
|
||||
```
|
||||
|
||||
Note: use `0.0.0.0:8080:8080` instead of `127.0.0.1:8080:8080` if you want to expose the container externally.
|
||||
|
||||
This command mounts the local directories inside the container, forwards port 8080 and 9090 out of the container so
|
||||
the headscale instance becomes available and then detaches so headscale runs in the background.
|
||||
This command will mount `config/` under `/etc/headscale`, forward port 8080 out of the container so the
|
||||
headscale instance becomes available and then detach so headscale runs in the background.
|
||||
|
||||
A similar configuration for `docker-compose`:
|
||||
Example `docker-compose.yaml`
|
||||
|
||||
```yaml
|
||||
version: "3.7"
|
||||
|
||||
```yaml title="docker-compose.yaml"
|
||||
services:
|
||||
headscale:
|
||||
image: docker.io/headscale/headscale:<VERSION>
|
||||
image: headscale/headscale:<VERSION>
|
||||
restart: unless-stopped
|
||||
container_name: headscale
|
||||
ports:
|
||||
- "127.0.0.1:8080:8080"
|
||||
- "127.0.0.1:9090:9090"
|
||||
volumes:
|
||||
# Please set <HEADSCALE_PATH> to the absolute path
|
||||
# of the previously created headscale directory.
|
||||
- <HEADSCALE_PATH>/config:/etc/headscale
|
||||
- <HEADSCALE_PATH>/lib:/var/lib/headscale
|
||||
- <HEADSCALE_PATH>/run:/var/run/headscale
|
||||
# Please change <CONFIG_PATH> to the fullpath of the config folder just created
|
||||
- <CONFIG_PATH>:/etc/headscale
|
||||
command: serve
|
||||
```
|
||||
|
||||
@@ -97,7 +98,7 @@ Registry](https://github.com/juanfont/headscale/pkgs/container/headscale). The c
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
On a client machine, execute the `tailscale up` command to login:
|
||||
On a client machine, execute the `tailscale` login command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL
|
||||
@@ -110,7 +111,7 @@ docker exec -it headscale \
|
||||
headscale nodes register --user myfirstuser --key <YOUR_MACHINE_KEY>
|
||||
```
|
||||
|
||||
### Register a machine using a pre authenticated key
|
||||
### Register machine using a pre authenticated key
|
||||
|
||||
Generate a key using the command line:
|
||||
|
||||
@@ -119,7 +120,7 @@ docker exec -it headscale \
|
||||
headscale preauthkeys create --user myfirstuser --reusable --expiration 24h
|
||||
```
|
||||
|
||||
This will return a pre-authenticated key that can be used to connect a node to headscale with the `tailscale up` command:
|
||||
This will return a pre-authenticated key that can be used to connect a node to headscale during the `tailscale` command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
@@ -127,11 +128,11 @@ tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
|
||||
## Debugging headscale running in Docker
|
||||
|
||||
The Headscale container image is based on a "distroless" image that does not contain a shell or any other debug tools. If you need to debug headscale running in the Docker container, you can use the `-debug` variant, for example `docker.io/headscale/headscale:x.x.x-debug`.
|
||||
The `headscale/headscale` Docker container is based on a "distroless" image that does not contain a shell or any other debug tools. If you need to debug your application running in the Docker container, you can use the `-debug` variant, for example `headscale/headscale:x.x.x-debug`.
|
||||
|
||||
### Running the debug Docker container
|
||||
|
||||
To run the debug Docker container, use the exact same commands as above, but replace `docker.io/headscale/headscale:x.x.x` with `docker.io/headscale/headscale:x.x.x-debug` (`x.x.x` is the version of headscale). The two containers are compatible with each other, so you can alternate between them.
|
||||
To run the debug Docker container, use the exact same commands as above, but replace `headscale/headscale:x.x.x` with `headscale/headscale:x.x.x-debug` (`x.x.x` is the version of headscale). The two containers are compatible with each other, so you can alternate between them.
|
||||
|
||||
### Executing commands in the debug container
|
||||
|
||||
@@ -141,14 +142,14 @@ Additionally, the debug container includes a minimalist Busybox shell.
|
||||
|
||||
To launch a shell in the container, use:
|
||||
|
||||
```shell
|
||||
docker run -it docker.io/headscale/headscale:x.x.x-debug sh
|
||||
```
|
||||
docker run -it headscale/headscale:x.x.x-debug sh
|
||||
```
|
||||
|
||||
You can also execute commands directly, such as `ls /ko-app` in this example:
|
||||
|
||||
```shell
|
||||
docker run docker.io/headscale/headscale:x.x.x-debug ls /ko-app
|
||||
```
|
||||
docker run headscale/headscale:x.x.x-debug ls /ko-app
|
||||
```
|
||||
|
||||
Using `docker exec -it` allows you to run commands in an existing container.
|
||||
|
||||
@@ -7,7 +7,7 @@ Both are available on the [GitHub releases page](https://github.com/juanfont/hea
|
||||
|
||||
It is recommended to use our DEB packages to install headscale on a Debian based system as those packages configure a
|
||||
local user to run headscale, provide a default configuration and ship with a systemd service file. Supported
|
||||
distributions are Ubuntu 22.04 or newer, Debian 11 or newer.
|
||||
distributions are Ubuntu 20.04 or newer, Debian 11 or newer.
|
||||
|
||||
1. Download the [latest headscale package](https://github.com/juanfont/headscale/releases/latest) for your platform (`.deb` for Ubuntu and Debian).
|
||||
|
||||
@@ -87,8 +87,8 @@ managed by systemd.
|
||||
sudo nano /etc/headscale/config.yaml
|
||||
```
|
||||
|
||||
1. Copy [headscale's systemd service file](https://github.com/juanfont/headscale/blob/main/packaging/systemd/headscale.service)
|
||||
to `/etc/systemd/system/headscale.service` and adjust it to suit your local setup. The following parameters likely need
|
||||
1. Copy [headscale's systemd service file](../../packaging/headscale.systemd.service) to
|
||||
`/etc/systemd/system/headscale.service` and adjust it to suit your local setup. The following parameters likely need
|
||||
to be modified: `ExecStart`, `WorkingDirectory`, `ReadWritePaths`.
|
||||
|
||||
1. In `/etc/headscale/config.yaml`, override the default `headscale` unix socket with a path that is writable by the
|
||||
|
||||
@@ -17,7 +17,7 @@ README](https://github.com/juanfont/headscale#contributing) for more information
|
||||
|
||||
```shell
|
||||
# Install prerequisites
|
||||
pkg_add go git
|
||||
pkg_add go
|
||||
|
||||
git clone https://github.com/juanfont/headscale.git
|
||||
|
||||
@@ -30,7 +30,7 @@ latestTag=$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
|
||||
git checkout $latestTag
|
||||
|
||||
go build -ldflags="-s -w -X github.com/juanfont/headscale/hscontrol/types.Version=$latestTag" -X github.com/juanfont/headscale/hscontrol/types.GitCommitHash=HASH" github.com/juanfont/headscale
|
||||
go build -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$latestTag" github.com/juanfont/headscale
|
||||
|
||||
# make it executable
|
||||
chmod a+x headscale
|
||||
|
||||
6
flake.lock
generated
6
flake.lock
generated
@@ -20,11 +20,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1752012998,
|
||||
"narHash": "sha256-Q82Ms+FQmgOBkdoSVm+FBpuFoeUAffNerR5yVV7SgT8=",
|
||||
"lastModified": 1738297584,
|
||||
"narHash": "sha256-AYvaFBzt8dU0fcSK2jKD0Vg23K2eIRxfsVXIPCW9a0E=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "2a2130494ad647f953593c4e84ea4df839fbd68c",
|
||||
"rev": "9189ac18287c599860e878e905da550aa6dec1cd",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
48
flake.nix
48
flake.nix
@@ -12,16 +12,17 @@
|
||||
flake-utils,
|
||||
...
|
||||
}: let
|
||||
headscaleVersion = self.shortRev or self.dirtyShortRev;
|
||||
commitHash = self.rev or self.dirtyRev;
|
||||
headscaleVersion =
|
||||
if (self ? shortRev)
|
||||
then self.shortRev
|
||||
else "dev";
|
||||
in
|
||||
{
|
||||
overlay = _: prev: let
|
||||
pkgs = nixpkgs.legacyPackages.${prev.system};
|
||||
buildGo = pkgs.buildGo124Module;
|
||||
vendorHash = "sha256-S2GnCg2dyfjIyi5gXhVEuRs5Bop2JAhZcnhg1fu4/Gg=";
|
||||
buildGo = pkgs.buildGo123Module;
|
||||
in {
|
||||
headscale = buildGo {
|
||||
headscale = buildGo rec {
|
||||
pname = "headscale";
|
||||
version = headscaleVersion;
|
||||
src = pkgs.lib.cleanSource self;
|
||||
@@ -31,27 +32,11 @@
|
||||
|
||||
# When updating go.mod or go.sum, a new sha will need to be calculated,
|
||||
# update this if you have a mismatch after doing a change to those files.
|
||||
inherit vendorHash;
|
||||
vendorHash = "sha256-ZQj2A0GdLhHc7JLW7qgpGBveXXNWg9ueSG47OZQQXEw=";
|
||||
|
||||
subPackages = ["cmd/headscale"];
|
||||
|
||||
ldflags = [
|
||||
"-s"
|
||||
"-w"
|
||||
"-X github.com/juanfont/headscale/hscontrol/types.Version=${headscaleVersion}"
|
||||
"-X github.com/juanfont/headscale/hscontrol/types.GitCommitHash=${commitHash}"
|
||||
];
|
||||
};
|
||||
|
||||
hi = buildGo {
|
||||
pname = "hi";
|
||||
version = headscaleVersion;
|
||||
src = pkgs.lib.cleanSource self;
|
||||
|
||||
checkFlags = ["-short"];
|
||||
inherit vendorHash;
|
||||
|
||||
subPackages = ["cmd/hi"];
|
||||
ldflags = ["-s" "-w" "-X github.com/juanfont/headscale/cmd/headscale/cli.Version=v${version}"];
|
||||
};
|
||||
|
||||
protoc-gen-grpc-gateway = buildGo rec {
|
||||
@@ -93,9 +78,6 @@
|
||||
# golangci-lint = prev.golangci-lint.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
# golangci-lint-langserver = prev.golangci-lint.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
|
||||
goreleaser = prev.goreleaser.override {
|
||||
buildGoModule = buildGo;
|
||||
@@ -112,10 +94,6 @@
|
||||
gofumpt = prev.gofumpt.override {
|
||||
buildGoModule = buildGo;
|
||||
};
|
||||
|
||||
# gopls = prev.gopls.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
};
|
||||
}
|
||||
// flake-utils.lib.eachDefaultSystem
|
||||
@@ -124,12 +102,11 @@
|
||||
overlays = [self.overlay];
|
||||
inherit system;
|
||||
};
|
||||
buildDeps = with pkgs; [git go_1_24 gnumake];
|
||||
buildDeps = with pkgs; [git go_1_23 gnumake];
|
||||
devDeps = with pkgs;
|
||||
buildDeps
|
||||
++ [
|
||||
golangci-lint
|
||||
golangci-lint-langserver
|
||||
golines
|
||||
nodePackages.prettier
|
||||
goreleaser
|
||||
@@ -137,7 +114,6 @@
|
||||
gotestsum
|
||||
gotests
|
||||
gofumpt
|
||||
gopls
|
||||
ksh
|
||||
ko
|
||||
yq-go
|
||||
@@ -156,11 +132,7 @@
|
||||
buf
|
||||
clang-tools # clang-format
|
||||
protobuf-language-server
|
||||
|
||||
# Add hi to make it even easier to use ci runner.
|
||||
hi
|
||||
]
|
||||
++ lib.optional pkgs.stdenv.isLinux [traceroute];
|
||||
];
|
||||
|
||||
# Add entry to build a docker image with headscale
|
||||
# caveat: only works on Linux
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc-gen-go v1.35.2
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/apikey.proto
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -23,14 +22,15 @@ const (
|
||||
)
|
||||
|
||||
type ApiKey struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
LastSeen *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_seen,json=lastSeen,proto3" json:"last_seen,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
LastSeen *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_seen,json=lastSeen,proto3" json:"last_seen,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ApiKey) Reset() {
|
||||
@@ -99,10 +99,11 @@ func (x *ApiKey) GetLastSeen() *timestamppb.Timestamp {
|
||||
}
|
||||
|
||||
type CreateApiKeyRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreateApiKeyRequest) Reset() {
|
||||
@@ -143,10 +144,11 @@ func (x *CreateApiKeyRequest) GetExpiration() *timestamppb.Timestamp {
|
||||
}
|
||||
|
||||
type CreateApiKeyResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
ApiKey string `protobuf:"bytes,1,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ApiKey string `protobuf:"bytes,1,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreateApiKeyResponse) Reset() {
|
||||
@@ -187,10 +189,11 @@ func (x *CreateApiKeyResponse) GetApiKey() string {
|
||||
}
|
||||
|
||||
type ExpireApiKeyRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ExpireApiKeyRequest) Reset() {
|
||||
@@ -231,9 +234,9 @@ func (x *ExpireApiKeyRequest) GetPrefix() string {
|
||||
}
|
||||
|
||||
type ExpireApiKeyResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *ExpireApiKeyResponse) Reset() {
|
||||
@@ -267,9 +270,9 @@ func (*ExpireApiKeyResponse) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type ListApiKeysRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *ListApiKeysRequest) Reset() {
|
||||
@@ -303,10 +306,11 @@ func (*ListApiKeysRequest) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type ListApiKeysResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
ApiKeys []*ApiKey `protobuf:"bytes,1,rep,name=api_keys,json=apiKeys,proto3" json:"api_keys,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ApiKeys []*ApiKey `protobuf:"bytes,1,rep,name=api_keys,json=apiKeys,proto3" json:"api_keys,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListApiKeysResponse) Reset() {
|
||||
@@ -347,10 +351,11 @@ func (x *ListApiKeysResponse) GetApiKeys() []*ApiKey {
|
||||
}
|
||||
|
||||
type DeleteApiKeyRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DeleteApiKeyRequest) Reset() {
|
||||
@@ -391,9 +396,9 @@ func (x *DeleteApiKeyRequest) GetPrefix() string {
|
||||
}
|
||||
|
||||
type DeleteApiKeyResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *DeleteApiKeyResponse) Reset() {
|
||||
@@ -428,42 +433,62 @@ func (*DeleteApiKeyResponse) Descriptor() ([]byte, []int) {
|
||||
|
||||
var File_headscale_v1_apikey_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_headscale_v1_apikey_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x19headscale/v1/apikey.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\xe0\x01\n" +
|
||||
"\x06ApiKey\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\x04R\x02id\x12\x16\n" +
|
||||
"\x06prefix\x18\x02 \x01(\tR\x06prefix\x12:\n" +
|
||||
"\n" +
|
||||
"expiration\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\n" +
|
||||
"expiration\x129\n" +
|
||||
"\n" +
|
||||
"created_at\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x127\n" +
|
||||
"\tlast_seen\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\blastSeen\"Q\n" +
|
||||
"\x13CreateApiKeyRequest\x12:\n" +
|
||||
"\n" +
|
||||
"expiration\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\n" +
|
||||
"expiration\"/\n" +
|
||||
"\x14CreateApiKeyResponse\x12\x17\n" +
|
||||
"\aapi_key\x18\x01 \x01(\tR\x06apiKey\"-\n" +
|
||||
"\x13ExpireApiKeyRequest\x12\x16\n" +
|
||||
"\x06prefix\x18\x01 \x01(\tR\x06prefix\"\x16\n" +
|
||||
"\x14ExpireApiKeyResponse\"\x14\n" +
|
||||
"\x12ListApiKeysRequest\"F\n" +
|
||||
"\x13ListApiKeysResponse\x12/\n" +
|
||||
"\bapi_keys\x18\x01 \x03(\v2\x14.headscale.v1.ApiKeyR\aapiKeys\"-\n" +
|
||||
"\x13DeleteApiKeyRequest\x12\x16\n" +
|
||||
"\x06prefix\x18\x01 \x01(\tR\x06prefix\"\x16\n" +
|
||||
"\x14DeleteApiKeyResponseB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
var file_headscale_v1_apikey_proto_rawDesc = []byte{
|
||||
0x0a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe0, 0x01, 0x0a, 0x06, 0x41,
|
||||
0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x3a, 0x0a,
|
||||
0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65,
|
||||
0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65,
|
||||
0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
|
||||
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74,
|
||||
0x65, 0x64, 0x41, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x65,
|
||||
0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
|
||||
0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x22, 0x51, 0x0a,
|
||||
0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x22, 0x2f, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x61, 0x70, 0x69, 0x5f,
|
||||
0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x70, 0x69, 0x4b, 0x65,
|
||||
0x79, 0x22, 0x2d, 0x0a, 0x13, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65,
|
||||
0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66,
|
||||
0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78,
|
||||
0x22, 0x16, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74,
|
||||
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x46,
|
||||
0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79,
|
||||
0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x07, 0x61,
|
||||
0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x2d, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
|
||||
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a,
|
||||
0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70,
|
||||
0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41,
|
||||
0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x29, 0x5a,
|
||||
0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e,
|
||||
0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67,
|
||||
0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_headscale_v1_apikey_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_apikey_proto_rawDescData []byte
|
||||
file_headscale_v1_apikey_proto_rawDescData = file_headscale_v1_apikey_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_headscale_v1_apikey_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_apikey_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_apikey_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_apikey_proto_rawDesc), len(file_headscale_v1_apikey_proto_rawDesc)))
|
||||
file_headscale_v1_apikey_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_apikey_proto_rawDescData)
|
||||
})
|
||||
return file_headscale_v1_apikey_proto_rawDescData
|
||||
}
|
||||
@@ -503,7 +528,7 @@ func file_headscale_v1_apikey_proto_init() {
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_apikey_proto_rawDesc), len(file_headscale_v1_apikey_proto_rawDesc)),
|
||||
RawDescriptor: file_headscale_v1_apikey_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 9,
|
||||
NumExtensions: 0,
|
||||
@@ -514,6 +539,7 @@ func file_headscale_v1_apikey_proto_init() {
|
||||
MessageInfos: file_headscale_v1_apikey_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_apikey_proto = out.File
|
||||
file_headscale_v1_apikey_proto_rawDesc = nil
|
||||
file_headscale_v1_apikey_proto_goTypes = nil
|
||||
file_headscale_v1_apikey_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc-gen-go v1.35.2
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/device.proto
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -23,11 +22,12 @@ const (
|
||||
)
|
||||
|
||||
type Latency struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
LatencyMs float32 `protobuf:"fixed32,1,opt,name=latency_ms,json=latencyMs,proto3" json:"latency_ms,omitempty"`
|
||||
Preferred bool `protobuf:"varint,2,opt,name=preferred,proto3" json:"preferred,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
LatencyMs float32 `protobuf:"fixed32,1,opt,name=latency_ms,json=latencyMs,proto3" json:"latency_ms,omitempty"`
|
||||
Preferred bool `protobuf:"varint,2,opt,name=preferred,proto3" json:"preferred,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Latency) Reset() {
|
||||
@@ -75,15 +75,16 @@ func (x *Latency) GetPreferred() bool {
|
||||
}
|
||||
|
||||
type ClientSupports struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
HairPinning bool `protobuf:"varint,1,opt,name=hair_pinning,json=hairPinning,proto3" json:"hair_pinning,omitempty"`
|
||||
Ipv6 bool `protobuf:"varint,2,opt,name=ipv6,proto3" json:"ipv6,omitempty"`
|
||||
Pcp bool `protobuf:"varint,3,opt,name=pcp,proto3" json:"pcp,omitempty"`
|
||||
Pmp bool `protobuf:"varint,4,opt,name=pmp,proto3" json:"pmp,omitempty"`
|
||||
Udp bool `protobuf:"varint,5,opt,name=udp,proto3" json:"udp,omitempty"`
|
||||
Upnp bool `protobuf:"varint,6,opt,name=upnp,proto3" json:"upnp,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
HairPinning bool `protobuf:"varint,1,opt,name=hair_pinning,json=hairPinning,proto3" json:"hair_pinning,omitempty"`
|
||||
Ipv6 bool `protobuf:"varint,2,opt,name=ipv6,proto3" json:"ipv6,omitempty"`
|
||||
Pcp bool `protobuf:"varint,3,opt,name=pcp,proto3" json:"pcp,omitempty"`
|
||||
Pmp bool `protobuf:"varint,4,opt,name=pmp,proto3" json:"pmp,omitempty"`
|
||||
Udp bool `protobuf:"varint,5,opt,name=udp,proto3" json:"udp,omitempty"`
|
||||
Upnp bool `protobuf:"varint,6,opt,name=upnp,proto3" json:"upnp,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ClientSupports) Reset() {
|
||||
@@ -159,14 +160,15 @@ func (x *ClientSupports) GetUpnp() bool {
|
||||
}
|
||||
|
||||
type ClientConnectivity struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Endpoints []string `protobuf:"bytes,1,rep,name=endpoints,proto3" json:"endpoints,omitempty"`
|
||||
Derp string `protobuf:"bytes,2,opt,name=derp,proto3" json:"derp,omitempty"`
|
||||
MappingVariesByDestIp bool `protobuf:"varint,3,opt,name=mapping_varies_by_dest_ip,json=mappingVariesByDestIp,proto3" json:"mapping_varies_by_dest_ip,omitempty"`
|
||||
Latency map[string]*Latency `protobuf:"bytes,4,rep,name=latency,proto3" json:"latency,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
ClientSupports *ClientSupports `protobuf:"bytes,5,opt,name=client_supports,json=clientSupports,proto3" json:"client_supports,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Endpoints []string `protobuf:"bytes,1,rep,name=endpoints,proto3" json:"endpoints,omitempty"`
|
||||
Derp string `protobuf:"bytes,2,opt,name=derp,proto3" json:"derp,omitempty"`
|
||||
MappingVariesByDestIp bool `protobuf:"varint,3,opt,name=mapping_varies_by_dest_ip,json=mappingVariesByDestIp,proto3" json:"mapping_varies_by_dest_ip,omitempty"`
|
||||
Latency map[string]*Latency `protobuf:"bytes,4,rep,name=latency,proto3" json:"latency,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
ClientSupports *ClientSupports `protobuf:"bytes,5,opt,name=client_supports,json=clientSupports,proto3" json:"client_supports,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ClientConnectivity) Reset() {
|
||||
@@ -235,10 +237,11 @@ func (x *ClientConnectivity) GetClientSupports() *ClientSupports {
|
||||
}
|
||||
|
||||
type GetDeviceRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetDeviceRequest) Reset() {
|
||||
@@ -279,7 +282,10 @@ func (x *GetDeviceRequest) GetId() string {
|
||||
}
|
||||
|
||||
type GetDeviceResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Addresses []string `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses,omitempty"`
|
||||
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
User string `protobuf:"bytes,3,opt,name=user,proto3" json:"user,omitempty"`
|
||||
@@ -300,8 +306,6 @@ type GetDeviceResponse struct {
|
||||
EnabledRoutes []string `protobuf:"bytes,18,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
|
||||
AdvertisedRoutes []string `protobuf:"bytes,19,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
|
||||
ClientConnectivity *ClientConnectivity `protobuf:"bytes,20,opt,name=client_connectivity,json=clientConnectivity,proto3" json:"client_connectivity,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *GetDeviceResponse) Reset() {
|
||||
@@ -475,10 +479,11 @@ func (x *GetDeviceResponse) GetClientConnectivity() *ClientConnectivity {
|
||||
}
|
||||
|
||||
type DeleteDeviceRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DeleteDeviceRequest) Reset() {
|
||||
@@ -519,9 +524,9 @@ func (x *DeleteDeviceRequest) GetId() string {
|
||||
}
|
||||
|
||||
type DeleteDeviceResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *DeleteDeviceResponse) Reset() {
|
||||
@@ -555,10 +560,11 @@ func (*DeleteDeviceResponse) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type GetDeviceRoutesRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetDeviceRoutesRequest) Reset() {
|
||||
@@ -599,11 +605,12 @@ func (x *GetDeviceRoutesRequest) GetId() string {
|
||||
}
|
||||
|
||||
type GetDeviceRoutesResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
EnabledRoutes []string `protobuf:"bytes,1,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
|
||||
AdvertisedRoutes []string `protobuf:"bytes,2,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
EnabledRoutes []string `protobuf:"bytes,1,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
|
||||
AdvertisedRoutes []string `protobuf:"bytes,2,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetDeviceRoutesResponse) Reset() {
|
||||
@@ -651,11 +658,12 @@ func (x *GetDeviceRoutesResponse) GetAdvertisedRoutes() []string {
|
||||
}
|
||||
|
||||
type EnableDeviceRoutesRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Routes []string `protobuf:"bytes,2,rep,name=routes,proto3" json:"routes,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Routes []string `protobuf:"bytes,2,rep,name=routes,proto3" json:"routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *EnableDeviceRoutesRequest) Reset() {
|
||||
@@ -703,11 +711,12 @@ func (x *EnableDeviceRoutesRequest) GetRoutes() []string {
|
||||
}
|
||||
|
||||
type EnableDeviceRoutesResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
EnabledRoutes []string `protobuf:"bytes,1,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
|
||||
AdvertisedRoutes []string `protobuf:"bytes,2,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
EnabledRoutes []string `protobuf:"bytes,1,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
|
||||
AdvertisedRoutes []string `protobuf:"bytes,2,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *EnableDeviceRoutesResponse) Reset() {
|
||||
@@ -756,80 +765,139 @@ func (x *EnableDeviceRoutesResponse) GetAdvertisedRoutes() []string {
|
||||
|
||||
var File_headscale_v1_device_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_headscale_v1_device_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x19headscale/v1/device.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"F\n" +
|
||||
"\aLatency\x12\x1d\n" +
|
||||
"\n" +
|
||||
"latency_ms\x18\x01 \x01(\x02R\tlatencyMs\x12\x1c\n" +
|
||||
"\tpreferred\x18\x02 \x01(\bR\tpreferred\"\x91\x01\n" +
|
||||
"\x0eClientSupports\x12!\n" +
|
||||
"\fhair_pinning\x18\x01 \x01(\bR\vhairPinning\x12\x12\n" +
|
||||
"\x04ipv6\x18\x02 \x01(\bR\x04ipv6\x12\x10\n" +
|
||||
"\x03pcp\x18\x03 \x01(\bR\x03pcp\x12\x10\n" +
|
||||
"\x03pmp\x18\x04 \x01(\bR\x03pmp\x12\x10\n" +
|
||||
"\x03udp\x18\x05 \x01(\bR\x03udp\x12\x12\n" +
|
||||
"\x04upnp\x18\x06 \x01(\bR\x04upnp\"\xe3\x02\n" +
|
||||
"\x12ClientConnectivity\x12\x1c\n" +
|
||||
"\tendpoints\x18\x01 \x03(\tR\tendpoints\x12\x12\n" +
|
||||
"\x04derp\x18\x02 \x01(\tR\x04derp\x128\n" +
|
||||
"\x19mapping_varies_by_dest_ip\x18\x03 \x01(\bR\x15mappingVariesByDestIp\x12G\n" +
|
||||
"\alatency\x18\x04 \x03(\v2-.headscale.v1.ClientConnectivity.LatencyEntryR\alatency\x12E\n" +
|
||||
"\x0fclient_supports\x18\x05 \x01(\v2\x1c.headscale.v1.ClientSupportsR\x0eclientSupports\x1aQ\n" +
|
||||
"\fLatencyEntry\x12\x10\n" +
|
||||
"\x03key\x18\x01 \x01(\tR\x03key\x12+\n" +
|
||||
"\x05value\x18\x02 \x01(\v2\x15.headscale.v1.LatencyR\x05value:\x028\x01\"\"\n" +
|
||||
"\x10GetDeviceRequest\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\tR\x02id\"\xa0\x06\n" +
|
||||
"\x11GetDeviceResponse\x12\x1c\n" +
|
||||
"\taddresses\x18\x01 \x03(\tR\taddresses\x12\x0e\n" +
|
||||
"\x02id\x18\x02 \x01(\tR\x02id\x12\x12\n" +
|
||||
"\x04user\x18\x03 \x01(\tR\x04user\x12\x12\n" +
|
||||
"\x04name\x18\x04 \x01(\tR\x04name\x12\x1a\n" +
|
||||
"\bhostname\x18\x05 \x01(\tR\bhostname\x12%\n" +
|
||||
"\x0eclient_version\x18\x06 \x01(\tR\rclientVersion\x12)\n" +
|
||||
"\x10update_available\x18\a \x01(\bR\x0fupdateAvailable\x12\x0e\n" +
|
||||
"\x02os\x18\b \x01(\tR\x02os\x124\n" +
|
||||
"\acreated\x18\t \x01(\v2\x1a.google.protobuf.TimestampR\acreated\x127\n" +
|
||||
"\tlast_seen\x18\n" +
|
||||
" \x01(\v2\x1a.google.protobuf.TimestampR\blastSeen\x12.\n" +
|
||||
"\x13key_expiry_disabled\x18\v \x01(\bR\x11keyExpiryDisabled\x124\n" +
|
||||
"\aexpires\x18\f \x01(\v2\x1a.google.protobuf.TimestampR\aexpires\x12\x1e\n" +
|
||||
"\n" +
|
||||
"authorized\x18\r \x01(\bR\n" +
|
||||
"authorized\x12\x1f\n" +
|
||||
"\vis_external\x18\x0e \x01(\bR\n" +
|
||||
"isExternal\x12\x1f\n" +
|
||||
"\vmachine_key\x18\x0f \x01(\tR\n" +
|
||||
"machineKey\x12\x19\n" +
|
||||
"\bnode_key\x18\x10 \x01(\tR\anodeKey\x12>\n" +
|
||||
"\x1bblocks_incoming_connections\x18\x11 \x01(\bR\x19blocksIncomingConnections\x12%\n" +
|
||||
"\x0eenabled_routes\x18\x12 \x03(\tR\renabledRoutes\x12+\n" +
|
||||
"\x11advertised_routes\x18\x13 \x03(\tR\x10advertisedRoutes\x12Q\n" +
|
||||
"\x13client_connectivity\x18\x14 \x01(\v2 .headscale.v1.ClientConnectivityR\x12clientConnectivity\"%\n" +
|
||||
"\x13DeleteDeviceRequest\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\tR\x02id\"\x16\n" +
|
||||
"\x14DeleteDeviceResponse\"(\n" +
|
||||
"\x16GetDeviceRoutesRequest\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\tR\x02id\"m\n" +
|
||||
"\x17GetDeviceRoutesResponse\x12%\n" +
|
||||
"\x0eenabled_routes\x18\x01 \x03(\tR\renabledRoutes\x12+\n" +
|
||||
"\x11advertised_routes\x18\x02 \x03(\tR\x10advertisedRoutes\"C\n" +
|
||||
"\x19EnableDeviceRoutesRequest\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\tR\x02id\x12\x16\n" +
|
||||
"\x06routes\x18\x02 \x03(\tR\x06routes\"p\n" +
|
||||
"\x1aEnableDeviceRoutesResponse\x12%\n" +
|
||||
"\x0eenabled_routes\x18\x01 \x03(\tR\renabledRoutes\x12+\n" +
|
||||
"\x11advertised_routes\x18\x02 \x03(\tR\x10advertisedRoutesB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
var file_headscale_v1_device_proto_rawDesc = []byte{
|
||||
0x0a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x64,
|
||||
0x65, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x46, 0x0a, 0x07, 0x4c, 0x61,
|
||||
0x74, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79,
|
||||
0x5f, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e,
|
||||
0x63, 0x79, 0x4d, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65,
|
||||
0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72,
|
||||
0x65, 0x64, 0x22, 0x91, 0x01, 0x0a, 0x0e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x75, 0x70,
|
||||
0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x61, 0x69, 0x72, 0x5f, 0x70, 0x69,
|
||||
0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x68, 0x61, 0x69,
|
||||
0x72, 0x50, 0x69, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x36,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x69, 0x70, 0x76, 0x36, 0x12, 0x10, 0x0a, 0x03,
|
||||
0x70, 0x63, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x70, 0x63, 0x70, 0x12, 0x10,
|
||||
0x0a, 0x03, 0x70, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x70, 0x6d, 0x70,
|
||||
0x12, 0x10, 0x0a, 0x03, 0x75, 0x64, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x75,
|
||||
0x64, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x70, 0x6e, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08,
|
||||
0x52, 0x04, 0x75, 0x70, 0x6e, 0x70, 0x22, 0xe3, 0x02, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e,
|
||||
0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x12, 0x1c, 0x0a,
|
||||
0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09,
|
||||
0x52, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64,
|
||||
0x65, 0x72, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x65, 0x72, 0x70, 0x12,
|
||||
0x38, 0x0a, 0x19, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x65,
|
||||
0x73, 0x5f, 0x62, 0x79, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x08, 0x52, 0x15, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x72, 0x69, 0x65,
|
||||
0x73, 0x42, 0x79, 0x44, 0x65, 0x73, 0x74, 0x49, 0x70, 0x12, 0x47, 0x0a, 0x07, 0x6c, 0x61, 0x74,
|
||||
0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
|
||||
0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x2e, 0x4c, 0x61, 0x74,
|
||||
0x65, 0x6e, 0x63, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6c, 0x61, 0x74, 0x65, 0x6e,
|
||||
0x63, 0x79, 0x12, 0x45, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x75, 0x70,
|
||||
0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e,
|
||||
0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
|
||||
0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x1a, 0x51, 0x0a, 0x0c, 0x4c, 0x61, 0x74,
|
||||
0x65, 0x6e, 0x63, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63,
|
||||
0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x22, 0x0a, 0x10,
|
||||
0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64,
|
||||
0x22, 0xa0, 0x06, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
|
||||
0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65,
|
||||
0x73, 0x73, 0x65, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08,
|
||||
0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
|
||||
0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6c, 0x69, 0x65,
|
||||
0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
|
||||
0x29, 0x0a, 0x10, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x75, 0x70, 0x64, 0x61, 0x74,
|
||||
0x65, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x73,
|
||||
0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x6f, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x63, 0x72,
|
||||
0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
|
||||
0x12, 0x37, 0x0a, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x65, 0x6e, 0x18, 0x0a, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
|
||||
0x08, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x12, 0x2e, 0x0a, 0x13, 0x6b, 0x65, 0x79,
|
||||
0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64,
|
||||
0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x6b, 0x65, 0x79, 0x45, 0x78, 0x70, 0x69, 0x72,
|
||||
0x79, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x78, 0x70,
|
||||
0x69, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
|
||||
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12,
|
||||
0x1e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x18, 0x0d, 0x20,
|
||||
0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x12,
|
||||
0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x0e,
|
||||
0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
|
||||
0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18,
|
||||
0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x4b, 0x65,
|
||||
0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x10, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x3e, 0x0a, 0x1b,
|
||||
0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x5f,
|
||||
0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28,
|
||||
0x08, 0x52, 0x19, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e,
|
||||
0x67, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e,
|
||||
0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x12,
|
||||
0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65,
|
||||
0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10,
|
||||
0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x12, 0x51, 0x0a, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
|
||||
0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69,
|
||||
0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52,
|
||||
0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76,
|
||||
0x69, 0x74, 0x79, 0x22, 0x25, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x65, 0x76,
|
||||
0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x28, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02,
|
||||
0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x6d, 0x0a, 0x17,
|
||||
0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c,
|
||||
0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
|
||||
0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2b,
|
||||
0x0a, 0x11, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x61, 0x64, 0x76, 0x65, 0x72,
|
||||
0x74, 0x69, 0x73, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x19, 0x45,
|
||||
0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x22, 0x70, 0x0a, 0x1a, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65,
|
||||
0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25,
|
||||
0x0a, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69,
|
||||
0x73, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09,
|
||||
0x52, 0x10, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_headscale_v1_device_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_device_proto_rawDescData []byte
|
||||
file_headscale_v1_device_proto_rawDescData = file_headscale_v1_device_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_headscale_v1_device_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_device_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_device_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_device_proto_rawDesc), len(file_headscale_v1_device_proto_rawDesc)))
|
||||
file_headscale_v1_device_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_device_proto_rawDescData)
|
||||
})
|
||||
return file_headscale_v1_device_proto_rawDescData
|
||||
}
|
||||
@@ -874,7 +942,7 @@ func file_headscale_v1_device_proto_init() {
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_device_proto_rawDesc), len(file_headscale_v1_device_proto_rawDesc)),
|
||||
RawDescriptor: file_headscale_v1_device_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 12,
|
||||
NumExtensions: 0,
|
||||
@@ -885,6 +953,7 @@ func file_headscale_v1_device_proto_init() {
|
||||
MessageInfos: file_headscale_v1_device_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_device_proto = out.File
|
||||
file_headscale_v1_device_proto_rawDesc = nil
|
||||
file_headscale_v1_device_proto_goTypes = nil
|
||||
file_headscale_v1_device_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc-gen-go v1.35.2
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -23,90 +22,291 @@ const (
|
||||
|
||||
var File_headscale_v1_headscale_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_headscale_v1_headscale_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x1cheadscale/v1/headscale.proto\x12\fheadscale.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17headscale/v1/user.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/node.proto\x1a\x19headscale/v1/apikey.proto\x1a\x19headscale/v1/policy.proto2\xa3\x16\n" +
|
||||
"\x10HeadscaleService\x12h\n" +
|
||||
"\n" +
|
||||
"CreateUser\x12\x1f.headscale.v1.CreateUserRequest\x1a .headscale.v1.CreateUserResponse\"\x17\x82\xd3\xe4\x93\x02\x11:\x01*\"\f/api/v1/user\x12\x80\x01\n" +
|
||||
"\n" +
|
||||
"RenameUser\x12\x1f.headscale.v1.RenameUserRequest\x1a .headscale.v1.RenameUserResponse\"/\x82\xd3\xe4\x93\x02)\"'/api/v1/user/{old_id}/rename/{new_name}\x12j\n" +
|
||||
"\n" +
|
||||
"DeleteUser\x12\x1f.headscale.v1.DeleteUserRequest\x1a .headscale.v1.DeleteUserResponse\"\x19\x82\xd3\xe4\x93\x02\x13*\x11/api/v1/user/{id}\x12b\n" +
|
||||
"\tListUsers\x12\x1e.headscale.v1.ListUsersRequest\x1a\x1f.headscale.v1.ListUsersResponse\"\x14\x82\xd3\xe4\x93\x02\x0e\x12\f/api/v1/user\x12\x80\x01\n" +
|
||||
"\x10CreatePreAuthKey\x12%.headscale.v1.CreatePreAuthKeyRequest\x1a&.headscale.v1.CreatePreAuthKeyResponse\"\x1d\x82\xd3\xe4\x93\x02\x17:\x01*\"\x12/api/v1/preauthkey\x12\x87\x01\n" +
|
||||
"\x10ExpirePreAuthKey\x12%.headscale.v1.ExpirePreAuthKeyRequest\x1a&.headscale.v1.ExpirePreAuthKeyResponse\"$\x82\xd3\xe4\x93\x02\x1e:\x01*\"\x19/api/v1/preauthkey/expire\x12z\n" +
|
||||
"\x0fListPreAuthKeys\x12$.headscale.v1.ListPreAuthKeysRequest\x1a%.headscale.v1.ListPreAuthKeysResponse\"\x1a\x82\xd3\xe4\x93\x02\x14\x12\x12/api/v1/preauthkey\x12}\n" +
|
||||
"\x0fDebugCreateNode\x12$.headscale.v1.DebugCreateNodeRequest\x1a%.headscale.v1.DebugCreateNodeResponse\"\x1d\x82\xd3\xe4\x93\x02\x17:\x01*\"\x12/api/v1/debug/node\x12f\n" +
|
||||
"\aGetNode\x12\x1c.headscale.v1.GetNodeRequest\x1a\x1d.headscale.v1.GetNodeResponse\"\x1e\x82\xd3\xe4\x93\x02\x18\x12\x16/api/v1/node/{node_id}\x12n\n" +
|
||||
"\aSetTags\x12\x1c.headscale.v1.SetTagsRequest\x1a\x1d.headscale.v1.SetTagsResponse\"&\x82\xd3\xe4\x93\x02 :\x01*\"\x1b/api/v1/node/{node_id}/tags\x12\x96\x01\n" +
|
||||
"\x11SetApprovedRoutes\x12&.headscale.v1.SetApprovedRoutesRequest\x1a'.headscale.v1.SetApprovedRoutesResponse\"0\x82\xd3\xe4\x93\x02*:\x01*\"%/api/v1/node/{node_id}/approve_routes\x12t\n" +
|
||||
"\fRegisterNode\x12!.headscale.v1.RegisterNodeRequest\x1a\".headscale.v1.RegisterNodeResponse\"\x1d\x82\xd3\xe4\x93\x02\x17\"\x15/api/v1/node/register\x12o\n" +
|
||||
"\n" +
|
||||
"DeleteNode\x12\x1f.headscale.v1.DeleteNodeRequest\x1a .headscale.v1.DeleteNodeResponse\"\x1e\x82\xd3\xe4\x93\x02\x18*\x16/api/v1/node/{node_id}\x12v\n" +
|
||||
"\n" +
|
||||
"ExpireNode\x12\x1f.headscale.v1.ExpireNodeRequest\x1a .headscale.v1.ExpireNodeResponse\"%\x82\xd3\xe4\x93\x02\x1f\"\x1d/api/v1/node/{node_id}/expire\x12\x81\x01\n" +
|
||||
"\n" +
|
||||
"RenameNode\x12\x1f.headscale.v1.RenameNodeRequest\x1a .headscale.v1.RenameNodeResponse\"0\x82\xd3\xe4\x93\x02*\"(/api/v1/node/{node_id}/rename/{new_name}\x12b\n" +
|
||||
"\tListNodes\x12\x1e.headscale.v1.ListNodesRequest\x1a\x1f.headscale.v1.ListNodesResponse\"\x14\x82\xd3\xe4\x93\x02\x0e\x12\f/api/v1/node\x12q\n" +
|
||||
"\bMoveNode\x12\x1d.headscale.v1.MoveNodeRequest\x1a\x1e.headscale.v1.MoveNodeResponse\"&\x82\xd3\xe4\x93\x02 :\x01*\"\x1b/api/v1/node/{node_id}/user\x12\x80\x01\n" +
|
||||
"\x0fBackfillNodeIPs\x12$.headscale.v1.BackfillNodeIPsRequest\x1a%.headscale.v1.BackfillNodeIPsResponse\" \x82\xd3\xe4\x93\x02\x1a\"\x18/api/v1/node/backfillips\x12p\n" +
|
||||
"\fCreateApiKey\x12!.headscale.v1.CreateApiKeyRequest\x1a\".headscale.v1.CreateApiKeyResponse\"\x19\x82\xd3\xe4\x93\x02\x13:\x01*\"\x0e/api/v1/apikey\x12w\n" +
|
||||
"\fExpireApiKey\x12!.headscale.v1.ExpireApiKeyRequest\x1a\".headscale.v1.ExpireApiKeyResponse\" \x82\xd3\xe4\x93\x02\x1a:\x01*\"\x15/api/v1/apikey/expire\x12j\n" +
|
||||
"\vListApiKeys\x12 .headscale.v1.ListApiKeysRequest\x1a!.headscale.v1.ListApiKeysResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/apikey\x12v\n" +
|
||||
"\fDeleteApiKey\x12!.headscale.v1.DeleteApiKeyRequest\x1a\".headscale.v1.DeleteApiKeyResponse\"\x1f\x82\xd3\xe4\x93\x02\x19*\x17/api/v1/apikey/{prefix}\x12d\n" +
|
||||
"\tGetPolicy\x12\x1e.headscale.v1.GetPolicyRequest\x1a\x1f.headscale.v1.GetPolicyResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/policy\x12g\n" +
|
||||
"\tSetPolicy\x12\x1e.headscale.v1.SetPolicyRequest\x1a\x1f.headscale.v1.SetPolicyResponse\"\x19\x82\xd3\xe4\x93\x02\x13:\x01*\x1a\x0e/api/v1/policyB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
var file_headscale_v1_headscale_proto_rawDesc = []byte{
|
||||
0x0a, 0x1c, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x1a, 0x17, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xe9, 0x19, 0x0a,
|
||||
0x10, 0x48, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
|
||||
0x65, 0x12, 0x68, 0x0a, 0x0a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12,
|
||||
0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43,
|
||||
0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x11, 0x3a, 0x01, 0x2a, 0x22, 0x0c, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x12, 0x80, 0x01, 0x0a, 0x0a,
|
||||
0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22, 0x27, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75,
|
||||
0x73, 0x65, 0x72, 0x2f, 0x7b, 0x6f, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x65, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x2f, 0x7b, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x6a,
|
||||
0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65,
|
||||
0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c,
|
||||
0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x2a, 0x11, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x75, 0x73, 0x65, 0x72, 0x2f, 0x7b, 0x69, 0x64, 0x7d, 0x12, 0x62, 0x0a, 0x09, 0x4c, 0x69,
|
||||
0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0e,
|
||||
0x12, 0x0c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x12, 0x80,
|
||||
0x01, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68,
|
||||
0x4b, 0x65, 0x79, 0x12, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68,
|
||||
0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
|
||||
0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x3a, 0x01, 0x2a, 0x22, 0x12, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65,
|
||||
0x79, 0x12, 0x87, 0x01, 0x0a, 0x10, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41,
|
||||
0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41,
|
||||
0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70,
|
||||
0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x3a, 0x01, 0x2a,
|
||||
0x22, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74,
|
||||
0x68, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x7a, 0x0a, 0x0f, 0x4c,
|
||||
0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x24,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69,
|
||||
0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b,
|
||||
0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4,
|
||||
0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65,
|
||||
0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x12, 0x7d, 0x0a, 0x0f, 0x44, 0x65, 0x62, 0x75, 0x67,
|
||||
0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43,
|
||||
0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x3a,
|
||||
0x01, 0x2a, 0x22, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75,
|
||||
0x67, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x66, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64,
|
||||
0x65, 0x12, 0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47,
|
||||
0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x6e,
|
||||
0x0a, 0x07, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x12, 0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01,
|
||||
0x2a, 0x22, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f,
|
||||
0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x74, 0x61, 0x67, 0x73, 0x12, 0x74,
|
||||
0x0a, 0x0c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x21,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65,
|
||||
0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x22, 0x15, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x72, 0x65, 0x67, 0x69,
|
||||
0x73, 0x74, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f,
|
||||
0x64, 0x65, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x2a, 0x16, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64,
|
||||
0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x76, 0x0a, 0x0a, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4e,
|
||||
0x6f, 0x64, 0x65, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x22, 0x1d,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f,
|
||||
0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x81, 0x01,
|
||||
0x0a, 0x0a, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x22, 0x28, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f,
|
||||
0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x2f, 0x7b, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x7d, 0x12, 0x62, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x1e,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69,
|
||||
0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69,
|
||||
0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x14, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0e, 0x12, 0x0c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x71, 0x0a, 0x08, 0x4d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64,
|
||||
0x65, 0x12, 0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x1a, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x4d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01, 0x2a, 0x22, 0x1b, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f,
|
||||
0x69, 0x64, 0x7d, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x12, 0x80, 0x01, 0x0a, 0x0f, 0x42, 0x61, 0x63,
|
||||
0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x12, 0x24, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b,
|
||||
0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
||||
0x1a, 0x22, 0x18, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f,
|
||||
0x62, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x69, 0x70, 0x73, 0x12, 0x64, 0x0a, 0x09, 0x47,
|
||||
0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
||||
0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x73, 0x12, 0x7c, 0x0a, 0x0b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x22, 0x20, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12,
|
||||
0x80, 0x01, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22,
|
||||
0x21, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f,
|
||||
0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x64, 0x69, 0x73, 0x61, 0x62,
|
||||
0x6c, 0x65, 0x12, 0x7f, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3,
|
||||
0xe4, 0x93, 0x02, 0x1f, 0x12, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f,
|
||||
0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x12, 0x75, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x2a,
|
||||
0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f,
|
||||
0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x70, 0x0a, 0x0c, 0x43, 0x72,
|
||||
0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
|
||||
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65,
|
||||
0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x3a, 0x01, 0x2a, 0x22, 0x0e, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x12, 0x77, 0x0a, 0x0c,
|
||||
0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69,
|
||||
0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45,
|
||||
0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x3a, 0x01, 0x2a, 0x22, 0x15,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x65,
|
||||
0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x6a, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69,
|
||||
0x4b, 0x65, 0x79, 0x73, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
||||
0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65,
|
||||
0x79, 0x12, 0x76, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65,
|
||||
0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19,
|
||||
0x2a, 0x17, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79,
|
||||
0x2f, 0x7b, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x7d, 0x12, 0x64, 0x0a, 0x09, 0x47, 0x65, 0x74,
|
||||
0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12,
|
||||
0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
|
||||
0x67, 0x0a, 0x09, 0x53, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1e, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x50,
|
||||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x50,
|
||||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x13, 0x3a, 0x01, 0x2a, 0x1a, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f,
|
||||
0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_headscale_v1_headscale_proto_goTypes = []any{
|
||||
(*CreateUserRequest)(nil), // 0: headscale.v1.CreateUserRequest
|
||||
(*RenameUserRequest)(nil), // 1: headscale.v1.RenameUserRequest
|
||||
(*DeleteUserRequest)(nil), // 2: headscale.v1.DeleteUserRequest
|
||||
(*ListUsersRequest)(nil), // 3: headscale.v1.ListUsersRequest
|
||||
(*CreatePreAuthKeyRequest)(nil), // 4: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 5: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 6: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateNodeRequest)(nil), // 7: headscale.v1.DebugCreateNodeRequest
|
||||
(*GetNodeRequest)(nil), // 8: headscale.v1.GetNodeRequest
|
||||
(*SetTagsRequest)(nil), // 9: headscale.v1.SetTagsRequest
|
||||
(*SetApprovedRoutesRequest)(nil), // 10: headscale.v1.SetApprovedRoutesRequest
|
||||
(*RegisterNodeRequest)(nil), // 11: headscale.v1.RegisterNodeRequest
|
||||
(*DeleteNodeRequest)(nil), // 12: headscale.v1.DeleteNodeRequest
|
||||
(*ExpireNodeRequest)(nil), // 13: headscale.v1.ExpireNodeRequest
|
||||
(*RenameNodeRequest)(nil), // 14: headscale.v1.RenameNodeRequest
|
||||
(*ListNodesRequest)(nil), // 15: headscale.v1.ListNodesRequest
|
||||
(*MoveNodeRequest)(nil), // 16: headscale.v1.MoveNodeRequest
|
||||
(*BackfillNodeIPsRequest)(nil), // 17: headscale.v1.BackfillNodeIPsRequest
|
||||
(*CreateApiKeyRequest)(nil), // 18: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 19: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 20: headscale.v1.ListApiKeysRequest
|
||||
(*DeleteApiKeyRequest)(nil), // 21: headscale.v1.DeleteApiKeyRequest
|
||||
(*GetPolicyRequest)(nil), // 22: headscale.v1.GetPolicyRequest
|
||||
(*SetPolicyRequest)(nil), // 23: headscale.v1.SetPolicyRequest
|
||||
(*CreateUserResponse)(nil), // 24: headscale.v1.CreateUserResponse
|
||||
(*RenameUserResponse)(nil), // 25: headscale.v1.RenameUserResponse
|
||||
(*DeleteUserResponse)(nil), // 26: headscale.v1.DeleteUserResponse
|
||||
(*ListUsersResponse)(nil), // 27: headscale.v1.ListUsersResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 28: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 29: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 30: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateNodeResponse)(nil), // 31: headscale.v1.DebugCreateNodeResponse
|
||||
(*GetNodeResponse)(nil), // 32: headscale.v1.GetNodeResponse
|
||||
(*SetTagsResponse)(nil), // 33: headscale.v1.SetTagsResponse
|
||||
(*SetApprovedRoutesResponse)(nil), // 34: headscale.v1.SetApprovedRoutesResponse
|
||||
(*RegisterNodeResponse)(nil), // 35: headscale.v1.RegisterNodeResponse
|
||||
(*DeleteNodeResponse)(nil), // 36: headscale.v1.DeleteNodeResponse
|
||||
(*ExpireNodeResponse)(nil), // 37: headscale.v1.ExpireNodeResponse
|
||||
(*RenameNodeResponse)(nil), // 38: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesResponse)(nil), // 39: headscale.v1.ListNodesResponse
|
||||
(*MoveNodeResponse)(nil), // 40: headscale.v1.MoveNodeResponse
|
||||
(*BackfillNodeIPsResponse)(nil), // 41: headscale.v1.BackfillNodeIPsResponse
|
||||
(*CreateApiKeyResponse)(nil), // 42: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 43: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 44: headscale.v1.ListApiKeysResponse
|
||||
(*DeleteApiKeyResponse)(nil), // 45: headscale.v1.DeleteApiKeyResponse
|
||||
(*GetPolicyResponse)(nil), // 46: headscale.v1.GetPolicyResponse
|
||||
(*SetPolicyResponse)(nil), // 47: headscale.v1.SetPolicyResponse
|
||||
(*CreateUserRequest)(nil), // 0: headscale.v1.CreateUserRequest
|
||||
(*RenameUserRequest)(nil), // 1: headscale.v1.RenameUserRequest
|
||||
(*DeleteUserRequest)(nil), // 2: headscale.v1.DeleteUserRequest
|
||||
(*ListUsersRequest)(nil), // 3: headscale.v1.ListUsersRequest
|
||||
(*CreatePreAuthKeyRequest)(nil), // 4: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 5: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 6: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateNodeRequest)(nil), // 7: headscale.v1.DebugCreateNodeRequest
|
||||
(*GetNodeRequest)(nil), // 8: headscale.v1.GetNodeRequest
|
||||
(*SetTagsRequest)(nil), // 9: headscale.v1.SetTagsRequest
|
||||
(*RegisterNodeRequest)(nil), // 10: headscale.v1.RegisterNodeRequest
|
||||
(*DeleteNodeRequest)(nil), // 11: headscale.v1.DeleteNodeRequest
|
||||
(*ExpireNodeRequest)(nil), // 12: headscale.v1.ExpireNodeRequest
|
||||
(*RenameNodeRequest)(nil), // 13: headscale.v1.RenameNodeRequest
|
||||
(*ListNodesRequest)(nil), // 14: headscale.v1.ListNodesRequest
|
||||
(*MoveNodeRequest)(nil), // 15: headscale.v1.MoveNodeRequest
|
||||
(*BackfillNodeIPsRequest)(nil), // 16: headscale.v1.BackfillNodeIPsRequest
|
||||
(*GetRoutesRequest)(nil), // 17: headscale.v1.GetRoutesRequest
|
||||
(*EnableRouteRequest)(nil), // 18: headscale.v1.EnableRouteRequest
|
||||
(*DisableRouteRequest)(nil), // 19: headscale.v1.DisableRouteRequest
|
||||
(*GetNodeRoutesRequest)(nil), // 20: headscale.v1.GetNodeRoutesRequest
|
||||
(*DeleteRouteRequest)(nil), // 21: headscale.v1.DeleteRouteRequest
|
||||
(*CreateApiKeyRequest)(nil), // 22: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 23: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 24: headscale.v1.ListApiKeysRequest
|
||||
(*DeleteApiKeyRequest)(nil), // 25: headscale.v1.DeleteApiKeyRequest
|
||||
(*GetPolicyRequest)(nil), // 26: headscale.v1.GetPolicyRequest
|
||||
(*SetPolicyRequest)(nil), // 27: headscale.v1.SetPolicyRequest
|
||||
(*CreateUserResponse)(nil), // 28: headscale.v1.CreateUserResponse
|
||||
(*RenameUserResponse)(nil), // 29: headscale.v1.RenameUserResponse
|
||||
(*DeleteUserResponse)(nil), // 30: headscale.v1.DeleteUserResponse
|
||||
(*ListUsersResponse)(nil), // 31: headscale.v1.ListUsersResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 32: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 33: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 34: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateNodeResponse)(nil), // 35: headscale.v1.DebugCreateNodeResponse
|
||||
(*GetNodeResponse)(nil), // 36: headscale.v1.GetNodeResponse
|
||||
(*SetTagsResponse)(nil), // 37: headscale.v1.SetTagsResponse
|
||||
(*RegisterNodeResponse)(nil), // 38: headscale.v1.RegisterNodeResponse
|
||||
(*DeleteNodeResponse)(nil), // 39: headscale.v1.DeleteNodeResponse
|
||||
(*ExpireNodeResponse)(nil), // 40: headscale.v1.ExpireNodeResponse
|
||||
(*RenameNodeResponse)(nil), // 41: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesResponse)(nil), // 42: headscale.v1.ListNodesResponse
|
||||
(*MoveNodeResponse)(nil), // 43: headscale.v1.MoveNodeResponse
|
||||
(*BackfillNodeIPsResponse)(nil), // 44: headscale.v1.BackfillNodeIPsResponse
|
||||
(*GetRoutesResponse)(nil), // 45: headscale.v1.GetRoutesResponse
|
||||
(*EnableRouteResponse)(nil), // 46: headscale.v1.EnableRouteResponse
|
||||
(*DisableRouteResponse)(nil), // 47: headscale.v1.DisableRouteResponse
|
||||
(*GetNodeRoutesResponse)(nil), // 48: headscale.v1.GetNodeRoutesResponse
|
||||
(*DeleteRouteResponse)(nil), // 49: headscale.v1.DeleteRouteResponse
|
||||
(*CreateApiKeyResponse)(nil), // 50: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 51: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 52: headscale.v1.ListApiKeysResponse
|
||||
(*DeleteApiKeyResponse)(nil), // 53: headscale.v1.DeleteApiKeyResponse
|
||||
(*GetPolicyResponse)(nil), // 54: headscale.v1.GetPolicyResponse
|
||||
(*SetPolicyResponse)(nil), // 55: headscale.v1.SetPolicyResponse
|
||||
}
|
||||
var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
0, // 0: headscale.v1.HeadscaleService.CreateUser:input_type -> headscale.v1.CreateUserRequest
|
||||
@@ -119,46 +319,54 @@ var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
7, // 7: headscale.v1.HeadscaleService.DebugCreateNode:input_type -> headscale.v1.DebugCreateNodeRequest
|
||||
8, // 8: headscale.v1.HeadscaleService.GetNode:input_type -> headscale.v1.GetNodeRequest
|
||||
9, // 9: headscale.v1.HeadscaleService.SetTags:input_type -> headscale.v1.SetTagsRequest
|
||||
10, // 10: headscale.v1.HeadscaleService.SetApprovedRoutes:input_type -> headscale.v1.SetApprovedRoutesRequest
|
||||
11, // 11: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
|
||||
12, // 12: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
|
||||
13, // 13: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
|
||||
14, // 14: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
|
||||
15, // 15: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
|
||||
16, // 16: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest
|
||||
17, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
|
||||
18, // 18: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
19, // 19: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
20, // 20: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
21, // 21: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
|
||||
22, // 22: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest
|
||||
23, // 23: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest
|
||||
24, // 24: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
|
||||
25, // 25: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
|
||||
26, // 26: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
|
||||
27, // 27: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
|
||||
28, // 28: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
29, // 29: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
30, // 30: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
31, // 31: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
|
||||
32, // 32: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
|
||||
33, // 33: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
34, // 34: headscale.v1.HeadscaleService.SetApprovedRoutes:output_type -> headscale.v1.SetApprovedRoutesResponse
|
||||
35, // 35: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
|
||||
36, // 36: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
|
||||
37, // 37: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
|
||||
38, // 38: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
|
||||
39, // 39: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
|
||||
40, // 40: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
|
||||
41, // 41: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
|
||||
42, // 42: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
43, // 43: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
44, // 44: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
45, // 45: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
|
||||
46, // 46: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse
|
||||
47, // 47: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse
|
||||
24, // [24:48] is the sub-list for method output_type
|
||||
0, // [0:24] is the sub-list for method input_type
|
||||
10, // 10: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
|
||||
11, // 11: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
|
||||
12, // 12: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
|
||||
13, // 13: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
|
||||
14, // 14: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
|
||||
15, // 15: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest
|
||||
16, // 16: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
|
||||
17, // 17: headscale.v1.HeadscaleService.GetRoutes:input_type -> headscale.v1.GetRoutesRequest
|
||||
18, // 18: headscale.v1.HeadscaleService.EnableRoute:input_type -> headscale.v1.EnableRouteRequest
|
||||
19, // 19: headscale.v1.HeadscaleService.DisableRoute:input_type -> headscale.v1.DisableRouteRequest
|
||||
20, // 20: headscale.v1.HeadscaleService.GetNodeRoutes:input_type -> headscale.v1.GetNodeRoutesRequest
|
||||
21, // 21: headscale.v1.HeadscaleService.DeleteRoute:input_type -> headscale.v1.DeleteRouteRequest
|
||||
22, // 22: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
23, // 23: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
24, // 24: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
25, // 25: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
|
||||
26, // 26: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest
|
||||
27, // 27: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest
|
||||
28, // 28: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
|
||||
29, // 29: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
|
||||
30, // 30: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
|
||||
31, // 31: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
|
||||
32, // 32: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
33, // 33: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
34, // 34: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
35, // 35: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
|
||||
36, // 36: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
|
||||
37, // 37: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
38, // 38: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
|
||||
39, // 39: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
|
||||
40, // 40: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
|
||||
41, // 41: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
|
||||
42, // 42: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
|
||||
43, // 43: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
|
||||
44, // 44: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
|
||||
45, // 45: headscale.v1.HeadscaleService.GetRoutes:output_type -> headscale.v1.GetRoutesResponse
|
||||
46, // 46: headscale.v1.HeadscaleService.EnableRoute:output_type -> headscale.v1.EnableRouteResponse
|
||||
47, // 47: headscale.v1.HeadscaleService.DisableRoute:output_type -> headscale.v1.DisableRouteResponse
|
||||
48, // 48: headscale.v1.HeadscaleService.GetNodeRoutes:output_type -> headscale.v1.GetNodeRoutesResponse
|
||||
49, // 49: headscale.v1.HeadscaleService.DeleteRoute:output_type -> headscale.v1.DeleteRouteResponse
|
||||
50, // 50: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
51, // 51: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
52, // 52: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
53, // 53: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
|
||||
54, // 54: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse
|
||||
55, // 55: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse
|
||||
28, // [28:56] is the sub-list for method output_type
|
||||
0, // [0:28] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
@@ -172,13 +380,14 @@ func file_headscale_v1_headscale_proto_init() {
|
||||
file_headscale_v1_user_proto_init()
|
||||
file_headscale_v1_preauthkey_proto_init()
|
||||
file_headscale_v1_node_proto_init()
|
||||
file_headscale_v1_routes_proto_init()
|
||||
file_headscale_v1_apikey_proto_init()
|
||||
file_headscale_v1_policy_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_headscale_proto_rawDesc), len(file_headscale_v1_headscale_proto_rawDesc)),
|
||||
RawDescriptor: file_headscale_v1_headscale_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
@@ -188,6 +397,7 @@ func file_headscale_v1_headscale_proto_init() {
|
||||
DependencyIndexes: file_headscale_v1_headscale_proto_depIdxs,
|
||||
}.Build()
|
||||
File_headscale_v1_headscale_proto = out.File
|
||||
file_headscale_v1_headscale_proto_rawDesc = nil
|
||||
file_headscale_v1_headscale_proto_goTypes = nil
|
||||
file_headscale_v1_headscale_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
@@ -361,48 +361,6 @@ func local_request_HeadscaleService_SetTags_0(ctx context.Context, marshaler run
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_SetApprovedRoutes_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq SetApprovedRoutesRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
}
|
||||
protoReq.NodeId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
|
||||
}
|
||||
msg, err := client.SetApprovedRoutes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_SetApprovedRoutes_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq SetApprovedRoutesRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
}
|
||||
protoReq.NodeId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
|
||||
}
|
||||
msg, err := server.SetApprovedRoutes(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
var filter_HeadscaleService_RegisterNode_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
|
||||
|
||||
func request_HeadscaleService_RegisterNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
@@ -665,6 +623,168 @@ func local_request_HeadscaleService_BackfillNodeIPs_0(ctx context.Context, marsh
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_GetRoutes_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq GetRoutesRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
msg, err := client.GetRoutes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_GetRoutes_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq GetRoutesRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
msg, err := server.GetRoutes(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_EnableRoute_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq EnableRouteRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
val, ok := pathParams["route_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
|
||||
}
|
||||
protoReq.RouteId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
|
||||
}
|
||||
msg, err := client.EnableRoute(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_EnableRoute_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq EnableRouteRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
val, ok := pathParams["route_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
|
||||
}
|
||||
protoReq.RouteId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
|
||||
}
|
||||
msg, err := server.EnableRoute(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_DisableRoute_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq DisableRouteRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
val, ok := pathParams["route_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
|
||||
}
|
||||
protoReq.RouteId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
|
||||
}
|
||||
msg, err := client.DisableRoute(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_DisableRoute_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq DisableRouteRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
val, ok := pathParams["route_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
|
||||
}
|
||||
protoReq.RouteId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
|
||||
}
|
||||
msg, err := server.DisableRoute(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_GetNodeRoutes_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq GetNodeRoutesRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
}
|
||||
protoReq.NodeId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
|
||||
}
|
||||
msg, err := client.GetNodeRoutes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_GetNodeRoutes_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq GetNodeRoutesRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
}
|
||||
protoReq.NodeId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
|
||||
}
|
||||
msg, err := server.GetNodeRoutes(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_DeleteRoute_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq DeleteRouteRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
val, ok := pathParams["route_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
|
||||
}
|
||||
protoReq.RouteId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
|
||||
}
|
||||
msg, err := client.DeleteRoute(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_DeleteRoute_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq DeleteRouteRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
val, ok := pathParams["route_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
|
||||
}
|
||||
protoReq.RouteId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
|
||||
}
|
||||
msg, err := server.DeleteRoute(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_CreateApiKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq CreateApiKeyRequest
|
||||
@@ -1015,26 +1135,6 @@ func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_SetTags_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_SetApprovedRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/SetApprovedRoutes", runtime.WithHTTPPathPattern("/api/v1/node/{node_id}/approve_routes"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_SetApprovedRoutes_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_SetApprovedRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_RegisterNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -1175,6 +1275,106 @@ func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_BackfillNodeIPs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_HeadscaleService_GetRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/GetRoutes", runtime.WithHTTPPathPattern("/api/v1/routes"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_GetRoutes_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_GetRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_EnableRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/EnableRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}/enable"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_EnableRoute_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_EnableRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_DisableRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/DisableRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}/disable"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_DisableRoute_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_DisableRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_HeadscaleService_GetNodeRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/GetNodeRoutes", runtime.WithHTTPPathPattern("/api/v1/node/{node_id}/routes"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_GetNodeRoutes_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_GetNodeRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodDelete, pattern_HeadscaleService_DeleteRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/DeleteRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_DeleteRoute_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_DeleteRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_CreateApiKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -1505,23 +1705,6 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_SetTags_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_SetApprovedRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/SetApprovedRoutes", runtime.WithHTTPPathPattern("/api/v1/node/{node_id}/approve_routes"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_SetApprovedRoutes_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_SetApprovedRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_RegisterNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -1641,6 +1824,91 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_BackfillNodeIPs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_HeadscaleService_GetRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/GetRoutes", runtime.WithHTTPPathPattern("/api/v1/routes"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_GetRoutes_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_GetRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_EnableRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/EnableRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}/enable"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_EnableRoute_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_EnableRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_DisableRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/DisableRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}/disable"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_DisableRoute_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_DisableRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_HeadscaleService_GetNodeRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/GetNodeRoutes", runtime.WithHTTPPathPattern("/api/v1/node/{node_id}/routes"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_GetNodeRoutes_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_GetNodeRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodDelete, pattern_HeadscaleService_DeleteRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/DeleteRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_DeleteRoute_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_DeleteRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_CreateApiKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -1747,55 +2015,63 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_HeadscaleService_CreateUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "user"}, ""))
|
||||
pattern_HeadscaleService_RenameUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "user", "old_id", "rename", "new_name"}, ""))
|
||||
pattern_HeadscaleService_DeleteUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "user", "id"}, ""))
|
||||
pattern_HeadscaleService_ListUsers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "user"}, ""))
|
||||
pattern_HeadscaleService_CreatePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
|
||||
pattern_HeadscaleService_ExpirePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "preauthkey", "expire"}, ""))
|
||||
pattern_HeadscaleService_ListPreAuthKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
|
||||
pattern_HeadscaleService_DebugCreateNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "debug", "node"}, ""))
|
||||
pattern_HeadscaleService_GetNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "node", "node_id"}, ""))
|
||||
pattern_HeadscaleService_SetTags_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "tags"}, ""))
|
||||
pattern_HeadscaleService_SetApprovedRoutes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "approve_routes"}, ""))
|
||||
pattern_HeadscaleService_RegisterNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "node", "register"}, ""))
|
||||
pattern_HeadscaleService_DeleteNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "node", "node_id"}, ""))
|
||||
pattern_HeadscaleService_ExpireNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "expire"}, ""))
|
||||
pattern_HeadscaleService_RenameNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "node", "node_id", "rename", "new_name"}, ""))
|
||||
pattern_HeadscaleService_ListNodes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "node"}, ""))
|
||||
pattern_HeadscaleService_MoveNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "user"}, ""))
|
||||
pattern_HeadscaleService_BackfillNodeIPs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "node", "backfillips"}, ""))
|
||||
pattern_HeadscaleService_CreateApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "apikey"}, ""))
|
||||
pattern_HeadscaleService_ExpireApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "apikey", "expire"}, ""))
|
||||
pattern_HeadscaleService_ListApiKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "apikey"}, ""))
|
||||
pattern_HeadscaleService_DeleteApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "apikey", "prefix"}, ""))
|
||||
pattern_HeadscaleService_GetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
|
||||
pattern_HeadscaleService_SetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
|
||||
pattern_HeadscaleService_CreateUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "user"}, ""))
|
||||
pattern_HeadscaleService_RenameUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "user", "old_id", "rename", "new_name"}, ""))
|
||||
pattern_HeadscaleService_DeleteUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "user", "id"}, ""))
|
||||
pattern_HeadscaleService_ListUsers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "user"}, ""))
|
||||
pattern_HeadscaleService_CreatePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
|
||||
pattern_HeadscaleService_ExpirePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "preauthkey", "expire"}, ""))
|
||||
pattern_HeadscaleService_ListPreAuthKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
|
||||
pattern_HeadscaleService_DebugCreateNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "debug", "node"}, ""))
|
||||
pattern_HeadscaleService_GetNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "node", "node_id"}, ""))
|
||||
pattern_HeadscaleService_SetTags_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "tags"}, ""))
|
||||
pattern_HeadscaleService_RegisterNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "node", "register"}, ""))
|
||||
pattern_HeadscaleService_DeleteNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "node", "node_id"}, ""))
|
||||
pattern_HeadscaleService_ExpireNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "expire"}, ""))
|
||||
pattern_HeadscaleService_RenameNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "node", "node_id", "rename", "new_name"}, ""))
|
||||
pattern_HeadscaleService_ListNodes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "node"}, ""))
|
||||
pattern_HeadscaleService_MoveNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "user"}, ""))
|
||||
pattern_HeadscaleService_BackfillNodeIPs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "node", "backfillips"}, ""))
|
||||
pattern_HeadscaleService_GetRoutes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "routes"}, ""))
|
||||
pattern_HeadscaleService_EnableRoute_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "routes", "route_id", "enable"}, ""))
|
||||
pattern_HeadscaleService_DisableRoute_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "routes", "route_id", "disable"}, ""))
|
||||
pattern_HeadscaleService_GetNodeRoutes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "routes"}, ""))
|
||||
pattern_HeadscaleService_DeleteRoute_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "routes", "route_id"}, ""))
|
||||
pattern_HeadscaleService_CreateApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "apikey"}, ""))
|
||||
pattern_HeadscaleService_ExpireApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "apikey", "expire"}, ""))
|
||||
pattern_HeadscaleService_ListApiKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "apikey"}, ""))
|
||||
pattern_HeadscaleService_DeleteApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "apikey", "prefix"}, ""))
|
||||
pattern_HeadscaleService_GetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
|
||||
pattern_HeadscaleService_SetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
forward_HeadscaleService_CreateUser_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_RenameUser_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DeleteUser_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListUsers_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_CreatePreAuthKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ExpirePreAuthKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListPreAuthKeys_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DebugCreateNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_GetNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_SetTags_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_SetApprovedRoutes_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_RegisterNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DeleteNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ExpireNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_RenameNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListNodes_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_MoveNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_BackfillNodeIPs_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_CreateApiKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ExpireApiKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListApiKeys_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DeleteApiKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_GetPolicy_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_SetPolicy_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_CreateUser_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_RenameUser_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DeleteUser_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListUsers_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_CreatePreAuthKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ExpirePreAuthKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListPreAuthKeys_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DebugCreateNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_GetNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_SetTags_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_RegisterNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DeleteNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ExpireNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_RenameNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListNodes_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_MoveNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_BackfillNodeIPs_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_GetRoutes_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_EnableRoute_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DisableRoute_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_GetNodeRoutes_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DeleteRoute_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_CreateApiKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ExpireApiKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListApiKeys_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DeleteApiKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_GetPolicy_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_SetPolicy_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.5.1
|
||||
// - protoc-gen-go-grpc v1.3.0
|
||||
// - protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
@@ -15,34 +15,38 @@ import (
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.64.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion9
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
const (
|
||||
HeadscaleService_CreateUser_FullMethodName = "/headscale.v1.HeadscaleService/CreateUser"
|
||||
HeadscaleService_RenameUser_FullMethodName = "/headscale.v1.HeadscaleService/RenameUser"
|
||||
HeadscaleService_DeleteUser_FullMethodName = "/headscale.v1.HeadscaleService/DeleteUser"
|
||||
HeadscaleService_ListUsers_FullMethodName = "/headscale.v1.HeadscaleService/ListUsers"
|
||||
HeadscaleService_CreatePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/CreatePreAuthKey"
|
||||
HeadscaleService_ExpirePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpirePreAuthKey"
|
||||
HeadscaleService_ListPreAuthKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListPreAuthKeys"
|
||||
HeadscaleService_DebugCreateNode_FullMethodName = "/headscale.v1.HeadscaleService/DebugCreateNode"
|
||||
HeadscaleService_GetNode_FullMethodName = "/headscale.v1.HeadscaleService/GetNode"
|
||||
HeadscaleService_SetTags_FullMethodName = "/headscale.v1.HeadscaleService/SetTags"
|
||||
HeadscaleService_SetApprovedRoutes_FullMethodName = "/headscale.v1.HeadscaleService/SetApprovedRoutes"
|
||||
HeadscaleService_RegisterNode_FullMethodName = "/headscale.v1.HeadscaleService/RegisterNode"
|
||||
HeadscaleService_DeleteNode_FullMethodName = "/headscale.v1.HeadscaleService/DeleteNode"
|
||||
HeadscaleService_ExpireNode_FullMethodName = "/headscale.v1.HeadscaleService/ExpireNode"
|
||||
HeadscaleService_RenameNode_FullMethodName = "/headscale.v1.HeadscaleService/RenameNode"
|
||||
HeadscaleService_ListNodes_FullMethodName = "/headscale.v1.HeadscaleService/ListNodes"
|
||||
HeadscaleService_MoveNode_FullMethodName = "/headscale.v1.HeadscaleService/MoveNode"
|
||||
HeadscaleService_BackfillNodeIPs_FullMethodName = "/headscale.v1.HeadscaleService/BackfillNodeIPs"
|
||||
HeadscaleService_CreateApiKey_FullMethodName = "/headscale.v1.HeadscaleService/CreateApiKey"
|
||||
HeadscaleService_ExpireApiKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpireApiKey"
|
||||
HeadscaleService_ListApiKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListApiKeys"
|
||||
HeadscaleService_DeleteApiKey_FullMethodName = "/headscale.v1.HeadscaleService/DeleteApiKey"
|
||||
HeadscaleService_GetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/GetPolicy"
|
||||
HeadscaleService_SetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/SetPolicy"
|
||||
HeadscaleService_CreateUser_FullMethodName = "/headscale.v1.HeadscaleService/CreateUser"
|
||||
HeadscaleService_RenameUser_FullMethodName = "/headscale.v1.HeadscaleService/RenameUser"
|
||||
HeadscaleService_DeleteUser_FullMethodName = "/headscale.v1.HeadscaleService/DeleteUser"
|
||||
HeadscaleService_ListUsers_FullMethodName = "/headscale.v1.HeadscaleService/ListUsers"
|
||||
HeadscaleService_CreatePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/CreatePreAuthKey"
|
||||
HeadscaleService_ExpirePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpirePreAuthKey"
|
||||
HeadscaleService_ListPreAuthKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListPreAuthKeys"
|
||||
HeadscaleService_DebugCreateNode_FullMethodName = "/headscale.v1.HeadscaleService/DebugCreateNode"
|
||||
HeadscaleService_GetNode_FullMethodName = "/headscale.v1.HeadscaleService/GetNode"
|
||||
HeadscaleService_SetTags_FullMethodName = "/headscale.v1.HeadscaleService/SetTags"
|
||||
HeadscaleService_RegisterNode_FullMethodName = "/headscale.v1.HeadscaleService/RegisterNode"
|
||||
HeadscaleService_DeleteNode_FullMethodName = "/headscale.v1.HeadscaleService/DeleteNode"
|
||||
HeadscaleService_ExpireNode_FullMethodName = "/headscale.v1.HeadscaleService/ExpireNode"
|
||||
HeadscaleService_RenameNode_FullMethodName = "/headscale.v1.HeadscaleService/RenameNode"
|
||||
HeadscaleService_ListNodes_FullMethodName = "/headscale.v1.HeadscaleService/ListNodes"
|
||||
HeadscaleService_MoveNode_FullMethodName = "/headscale.v1.HeadscaleService/MoveNode"
|
||||
HeadscaleService_BackfillNodeIPs_FullMethodName = "/headscale.v1.HeadscaleService/BackfillNodeIPs"
|
||||
HeadscaleService_GetRoutes_FullMethodName = "/headscale.v1.HeadscaleService/GetRoutes"
|
||||
HeadscaleService_EnableRoute_FullMethodName = "/headscale.v1.HeadscaleService/EnableRoute"
|
||||
HeadscaleService_DisableRoute_FullMethodName = "/headscale.v1.HeadscaleService/DisableRoute"
|
||||
HeadscaleService_GetNodeRoutes_FullMethodName = "/headscale.v1.HeadscaleService/GetNodeRoutes"
|
||||
HeadscaleService_DeleteRoute_FullMethodName = "/headscale.v1.HeadscaleService/DeleteRoute"
|
||||
HeadscaleService_CreateApiKey_FullMethodName = "/headscale.v1.HeadscaleService/CreateApiKey"
|
||||
HeadscaleService_ExpireApiKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpireApiKey"
|
||||
HeadscaleService_ListApiKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListApiKeys"
|
||||
HeadscaleService_DeleteApiKey_FullMethodName = "/headscale.v1.HeadscaleService/DeleteApiKey"
|
||||
HeadscaleService_GetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/GetPolicy"
|
||||
HeadscaleService_SetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/SetPolicy"
|
||||
)
|
||||
|
||||
// HeadscaleServiceClient is the client API for HeadscaleService service.
|
||||
@@ -62,7 +66,6 @@ type HeadscaleServiceClient interface {
|
||||
DebugCreateNode(ctx context.Context, in *DebugCreateNodeRequest, opts ...grpc.CallOption) (*DebugCreateNodeResponse, error)
|
||||
GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error)
|
||||
SetTags(ctx context.Context, in *SetTagsRequest, opts ...grpc.CallOption) (*SetTagsResponse, error)
|
||||
SetApprovedRoutes(ctx context.Context, in *SetApprovedRoutesRequest, opts ...grpc.CallOption) (*SetApprovedRoutesResponse, error)
|
||||
RegisterNode(ctx context.Context, in *RegisterNodeRequest, opts ...grpc.CallOption) (*RegisterNodeResponse, error)
|
||||
DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*DeleteNodeResponse, error)
|
||||
ExpireNode(ctx context.Context, in *ExpireNodeRequest, opts ...grpc.CallOption) (*ExpireNodeResponse, error)
|
||||
@@ -70,6 +73,12 @@ type HeadscaleServiceClient interface {
|
||||
ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error)
|
||||
MoveNode(ctx context.Context, in *MoveNodeRequest, opts ...grpc.CallOption) (*MoveNodeResponse, error)
|
||||
BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error)
|
||||
// --- Route start ---
|
||||
GetRoutes(ctx context.Context, in *GetRoutesRequest, opts ...grpc.CallOption) (*GetRoutesResponse, error)
|
||||
EnableRoute(ctx context.Context, in *EnableRouteRequest, opts ...grpc.CallOption) (*EnableRouteResponse, error)
|
||||
DisableRoute(ctx context.Context, in *DisableRouteRequest, opts ...grpc.CallOption) (*DisableRouteResponse, error)
|
||||
GetNodeRoutes(ctx context.Context, in *GetNodeRoutesRequest, opts ...grpc.CallOption) (*GetNodeRoutesResponse, error)
|
||||
DeleteRoute(ctx context.Context, in *DeleteRouteRequest, opts ...grpc.CallOption) (*DeleteRouteResponse, error)
|
||||
// --- ApiKeys start ---
|
||||
CreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error)
|
||||
ExpireApiKey(ctx context.Context, in *ExpireApiKeyRequest, opts ...grpc.CallOption) (*ExpireApiKeyResponse, error)
|
||||
@@ -89,9 +98,8 @@ func NewHeadscaleServiceClient(cc grpc.ClientConnInterface) HeadscaleServiceClie
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) CreateUser(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*CreateUserResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(CreateUserResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_CreateUser_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_CreateUser_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -99,9 +107,8 @@ func (c *headscaleServiceClient) CreateUser(ctx context.Context, in *CreateUserR
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) RenameUser(ctx context.Context, in *RenameUserRequest, opts ...grpc.CallOption) (*RenameUserResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(RenameUserResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_RenameUser_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_RenameUser_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -109,9 +116,8 @@ func (c *headscaleServiceClient) RenameUser(ctx context.Context, in *RenameUserR
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DeleteUser(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*DeleteUserResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(DeleteUserResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteUser_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteUser_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -119,9 +125,8 @@ func (c *headscaleServiceClient) DeleteUser(ctx context.Context, in *DeleteUserR
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListUsers(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ListUsersResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListUsers_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListUsers_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -129,9 +134,8 @@ func (c *headscaleServiceClient) ListUsers(ctx context.Context, in *ListUsersReq
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) CreatePreAuthKey(ctx context.Context, in *CreatePreAuthKeyRequest, opts ...grpc.CallOption) (*CreatePreAuthKeyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(CreatePreAuthKeyResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_CreatePreAuthKey_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_CreatePreAuthKey_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -139,9 +143,8 @@ func (c *headscaleServiceClient) CreatePreAuthKey(ctx context.Context, in *Creat
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ExpirePreAuthKey(ctx context.Context, in *ExpirePreAuthKeyRequest, opts ...grpc.CallOption) (*ExpirePreAuthKeyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ExpirePreAuthKeyResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ExpirePreAuthKey_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ExpirePreAuthKey_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -149,9 +152,8 @@ func (c *headscaleServiceClient) ExpirePreAuthKey(ctx context.Context, in *Expir
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListPreAuthKeys(ctx context.Context, in *ListPreAuthKeysRequest, opts ...grpc.CallOption) (*ListPreAuthKeysResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ListPreAuthKeysResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListPreAuthKeys_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListPreAuthKeys_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -159,9 +161,8 @@ func (c *headscaleServiceClient) ListPreAuthKeys(ctx context.Context, in *ListPr
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DebugCreateNode(ctx context.Context, in *DebugCreateNodeRequest, opts ...grpc.CallOption) (*DebugCreateNodeResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(DebugCreateNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DebugCreateNode_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DebugCreateNode_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -169,9 +170,8 @@ func (c *headscaleServiceClient) DebugCreateNode(ctx context.Context, in *DebugC
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(GetNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_GetNode_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_GetNode_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -179,19 +179,8 @@ func (c *headscaleServiceClient) GetNode(ctx context.Context, in *GetNodeRequest
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) SetTags(ctx context.Context, in *SetTagsRequest, opts ...grpc.CallOption) (*SetTagsResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(SetTagsResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_SetTags_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) SetApprovedRoutes(ctx context.Context, in *SetApprovedRoutesRequest, opts ...grpc.CallOption) (*SetApprovedRoutesResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(SetApprovedRoutesResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_SetApprovedRoutes_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_SetTags_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -199,9 +188,8 @@ func (c *headscaleServiceClient) SetApprovedRoutes(ctx context.Context, in *SetA
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) RegisterNode(ctx context.Context, in *RegisterNodeRequest, opts ...grpc.CallOption) (*RegisterNodeResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(RegisterNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_RegisterNode_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_RegisterNode_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -209,9 +197,8 @@ func (c *headscaleServiceClient) RegisterNode(ctx context.Context, in *RegisterN
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*DeleteNodeResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(DeleteNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteNode_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteNode_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -219,9 +206,8 @@ func (c *headscaleServiceClient) DeleteNode(ctx context.Context, in *DeleteNodeR
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ExpireNode(ctx context.Context, in *ExpireNodeRequest, opts ...grpc.CallOption) (*ExpireNodeResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ExpireNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ExpireNode_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ExpireNode_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -229,9 +215,8 @@ func (c *headscaleServiceClient) ExpireNode(ctx context.Context, in *ExpireNodeR
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) RenameNode(ctx context.Context, in *RenameNodeRequest, opts ...grpc.CallOption) (*RenameNodeResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(RenameNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_RenameNode_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_RenameNode_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -239,9 +224,8 @@ func (c *headscaleServiceClient) RenameNode(ctx context.Context, in *RenameNodeR
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ListNodesResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListNodes_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListNodes_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -249,9 +233,8 @@ func (c *headscaleServiceClient) ListNodes(ctx context.Context, in *ListNodesReq
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) MoveNode(ctx context.Context, in *MoveNodeRequest, opts ...grpc.CallOption) (*MoveNodeResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(MoveNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_MoveNode_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_MoveNode_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -259,9 +242,53 @@ func (c *headscaleServiceClient) MoveNode(ctx context.Context, in *MoveNodeReque
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(BackfillNodeIPsResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_BackfillNodeIPs_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_BackfillNodeIPs_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetRoutes(ctx context.Context, in *GetRoutesRequest, opts ...grpc.CallOption) (*GetRoutesResponse, error) {
|
||||
out := new(GetRoutesResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_GetRoutes_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) EnableRoute(ctx context.Context, in *EnableRouteRequest, opts ...grpc.CallOption) (*EnableRouteResponse, error) {
|
||||
out := new(EnableRouteResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_EnableRoute_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DisableRoute(ctx context.Context, in *DisableRouteRequest, opts ...grpc.CallOption) (*DisableRouteResponse, error) {
|
||||
out := new(DisableRouteResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DisableRoute_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetNodeRoutes(ctx context.Context, in *GetNodeRoutesRequest, opts ...grpc.CallOption) (*GetNodeRoutesResponse, error) {
|
||||
out := new(GetNodeRoutesResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_GetNodeRoutes_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DeleteRoute(ctx context.Context, in *DeleteRouteRequest, opts ...grpc.CallOption) (*DeleteRouteResponse, error) {
|
||||
out := new(DeleteRouteResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteRoute_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -269,9 +296,8 @@ func (c *headscaleServiceClient) BackfillNodeIPs(ctx context.Context, in *Backfi
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) CreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(CreateApiKeyResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_CreateApiKey_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_CreateApiKey_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -279,9 +305,8 @@ func (c *headscaleServiceClient) CreateApiKey(ctx context.Context, in *CreateApi
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ExpireApiKey(ctx context.Context, in *ExpireApiKeyRequest, opts ...grpc.CallOption) (*ExpireApiKeyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ExpireApiKeyResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ExpireApiKey_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ExpireApiKey_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -289,9 +314,8 @@ func (c *headscaleServiceClient) ExpireApiKey(ctx context.Context, in *ExpireApi
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListApiKeys(ctx context.Context, in *ListApiKeysRequest, opts ...grpc.CallOption) (*ListApiKeysResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ListApiKeysResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListApiKeys_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListApiKeys_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -299,9 +323,8 @@ func (c *headscaleServiceClient) ListApiKeys(ctx context.Context, in *ListApiKey
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DeleteApiKey(ctx context.Context, in *DeleteApiKeyRequest, opts ...grpc.CallOption) (*DeleteApiKeyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(DeleteApiKeyResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteApiKey_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteApiKey_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -309,9 +332,8 @@ func (c *headscaleServiceClient) DeleteApiKey(ctx context.Context, in *DeleteApi
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetPolicy(ctx context.Context, in *GetPolicyRequest, opts ...grpc.CallOption) (*GetPolicyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(GetPolicyResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_GetPolicy_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_GetPolicy_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -319,9 +341,8 @@ func (c *headscaleServiceClient) GetPolicy(ctx context.Context, in *GetPolicyReq
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) SetPolicy(ctx context.Context, in *SetPolicyRequest, opts ...grpc.CallOption) (*SetPolicyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(SetPolicyResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_SetPolicy_FullMethodName, in, out, cOpts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_SetPolicy_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -330,7 +351,7 @@ func (c *headscaleServiceClient) SetPolicy(ctx context.Context, in *SetPolicyReq
|
||||
|
||||
// HeadscaleServiceServer is the server API for HeadscaleService service.
|
||||
// All implementations must embed UnimplementedHeadscaleServiceServer
|
||||
// for forward compatibility.
|
||||
// for forward compatibility
|
||||
type HeadscaleServiceServer interface {
|
||||
// --- User start ---
|
||||
CreateUser(context.Context, *CreateUserRequest) (*CreateUserResponse, error)
|
||||
@@ -345,7 +366,6 @@ type HeadscaleServiceServer interface {
|
||||
DebugCreateNode(context.Context, *DebugCreateNodeRequest) (*DebugCreateNodeResponse, error)
|
||||
GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error)
|
||||
SetTags(context.Context, *SetTagsRequest) (*SetTagsResponse, error)
|
||||
SetApprovedRoutes(context.Context, *SetApprovedRoutesRequest) (*SetApprovedRoutesResponse, error)
|
||||
RegisterNode(context.Context, *RegisterNodeRequest) (*RegisterNodeResponse, error)
|
||||
DeleteNode(context.Context, *DeleteNodeRequest) (*DeleteNodeResponse, error)
|
||||
ExpireNode(context.Context, *ExpireNodeRequest) (*ExpireNodeResponse, error)
|
||||
@@ -353,6 +373,12 @@ type HeadscaleServiceServer interface {
|
||||
ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error)
|
||||
MoveNode(context.Context, *MoveNodeRequest) (*MoveNodeResponse, error)
|
||||
BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error)
|
||||
// --- Route start ---
|
||||
GetRoutes(context.Context, *GetRoutesRequest) (*GetRoutesResponse, error)
|
||||
EnableRoute(context.Context, *EnableRouteRequest) (*EnableRouteResponse, error)
|
||||
DisableRoute(context.Context, *DisableRouteRequest) (*DisableRouteResponse, error)
|
||||
GetNodeRoutes(context.Context, *GetNodeRoutesRequest) (*GetNodeRoutesResponse, error)
|
||||
DeleteRoute(context.Context, *DeleteRouteRequest) (*DeleteRouteResponse, error)
|
||||
// --- ApiKeys start ---
|
||||
CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error)
|
||||
ExpireApiKey(context.Context, *ExpireApiKeyRequest) (*ExpireApiKeyResponse, error)
|
||||
@@ -364,12 +390,9 @@ type HeadscaleServiceServer interface {
|
||||
mustEmbedUnimplementedHeadscaleServiceServer()
|
||||
}
|
||||
|
||||
// UnimplementedHeadscaleServiceServer must be embedded to have
|
||||
// forward compatible implementations.
|
||||
//
|
||||
// NOTE: this should be embedded by value instead of pointer to avoid a nil
|
||||
// pointer dereference when methods are called.
|
||||
type UnimplementedHeadscaleServiceServer struct{}
|
||||
// UnimplementedHeadscaleServiceServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedHeadscaleServiceServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedHeadscaleServiceServer) CreateUser(context.Context, *CreateUserRequest) (*CreateUserResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateUser not implemented")
|
||||
@@ -401,9 +424,6 @@ func (UnimplementedHeadscaleServiceServer) GetNode(context.Context, *GetNodeRequ
|
||||
func (UnimplementedHeadscaleServiceServer) SetTags(context.Context, *SetTagsRequest) (*SetTagsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetTags not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) SetApprovedRoutes(context.Context, *SetApprovedRoutesRequest) (*SetApprovedRoutesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetApprovedRoutes not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) RegisterNode(context.Context, *RegisterNodeRequest) (*RegisterNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RegisterNode not implemented")
|
||||
}
|
||||
@@ -425,6 +445,21 @@ func (UnimplementedHeadscaleServiceServer) MoveNode(context.Context, *MoveNodeRe
|
||||
func (UnimplementedHeadscaleServiceServer) BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method BackfillNodeIPs not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetRoutes(context.Context, *GetRoutesRequest) (*GetRoutesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetRoutes not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) EnableRoute(context.Context, *EnableRouteRequest) (*EnableRouteResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method EnableRoute not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DisableRoute(context.Context, *DisableRouteRequest) (*DisableRouteResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DisableRoute not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetNodeRoutes(context.Context, *GetNodeRoutesRequest) (*GetNodeRoutesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetNodeRoutes not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeleteRoute(context.Context, *DeleteRouteRequest) (*DeleteRouteResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteRoute not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateApiKey not implemented")
|
||||
}
|
||||
@@ -444,7 +479,6 @@ func (UnimplementedHeadscaleServiceServer) SetPolicy(context.Context, *SetPolicy
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetPolicy not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) mustEmbedUnimplementedHeadscaleServiceServer() {}
|
||||
func (UnimplementedHeadscaleServiceServer) testEmbeddedByValue() {}
|
||||
|
||||
// UnsafeHeadscaleServiceServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to HeadscaleServiceServer will
|
||||
@@ -454,13 +488,6 @@ type UnsafeHeadscaleServiceServer interface {
|
||||
}
|
||||
|
||||
func RegisterHeadscaleServiceServer(s grpc.ServiceRegistrar, srv HeadscaleServiceServer) {
|
||||
// If the following call pancis, it indicates UnimplementedHeadscaleServiceServer was
|
||||
// embedded by pointer and is nil. This will cause panics if an
|
||||
// unimplemented method is ever invoked, so we test this at initialization
|
||||
// time to prevent it from happening at runtime later due to I/O.
|
||||
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
|
||||
t.testEmbeddedByValue()
|
||||
}
|
||||
s.RegisterService(&HeadscaleService_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
@@ -644,24 +671,6 @@ func _HeadscaleService_SetTags_Handler(srv interface{}, ctx context.Context, dec
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_SetApprovedRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SetApprovedRoutesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).SetApprovedRoutes(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_SetApprovedRoutes_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).SetApprovedRoutes(ctx, req.(*SetApprovedRoutesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_RegisterNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RegisterNodeRequest)
|
||||
if err := dec(in); err != nil {
|
||||
@@ -788,6 +797,96 @@ func _HeadscaleService_BackfillNodeIPs_Handler(srv interface{}, ctx context.Cont
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_GetRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetRoutesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).GetRoutes(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_GetRoutes_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).GetRoutes(ctx, req.(*GetRoutesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_EnableRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(EnableRouteRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).EnableRoute(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_EnableRoute_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).EnableRoute(ctx, req.(*EnableRouteRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_DisableRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DisableRouteRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).DisableRoute(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_DisableRoute_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DisableRoute(ctx, req.(*DisableRouteRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_GetNodeRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetNodeRoutesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).GetNodeRoutes(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_GetNodeRoutes_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).GetNodeRoutes(ctx, req.(*GetNodeRoutesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_DeleteRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteRouteRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).DeleteRoute(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_DeleteRoute_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DeleteRoute(ctx, req.(*DeleteRouteRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_CreateApiKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateApiKeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
@@ -943,10 +1042,6 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "SetTags",
|
||||
Handler: _HeadscaleService_SetTags_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "SetApprovedRoutes",
|
||||
Handler: _HeadscaleService_SetApprovedRoutes_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "RegisterNode",
|
||||
Handler: _HeadscaleService_RegisterNode_Handler,
|
||||
@@ -975,6 +1070,26 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "BackfillNodeIPs",
|
||||
Handler: _HeadscaleService_BackfillNodeIPs_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetRoutes",
|
||||
Handler: _HeadscaleService_GetRoutes_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "EnableRoute",
|
||||
Handler: _HeadscaleService_EnableRoute_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DisableRoute",
|
||||
Handler: _HeadscaleService_DisableRoute_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetNodeRoutes",
|
||||
Handler: _HeadscaleService_GetNodeRoutes_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteRoute",
|
||||
Handler: _HeadscaleService_DeleteRoute_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreateApiKey",
|
||||
Handler: _HeadscaleService_CreateApiKey_Handler,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc-gen-go v1.35.2
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/policy.proto
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -23,10 +22,11 @@ const (
|
||||
)
|
||||
|
||||
type SetPolicyRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
|
||||
}
|
||||
|
||||
func (x *SetPolicyRequest) Reset() {
|
||||
@@ -67,11 +67,12 @@ func (x *SetPolicyRequest) GetPolicy() string {
|
||||
}
|
||||
|
||||
type SetPolicyResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
|
||||
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
|
||||
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
|
||||
}
|
||||
|
||||
func (x *SetPolicyResponse) Reset() {
|
||||
@@ -119,9 +120,9 @@ func (x *SetPolicyResponse) GetUpdatedAt() *timestamppb.Timestamp {
|
||||
}
|
||||
|
||||
type GetPolicyRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *GetPolicyRequest) Reset() {
|
||||
@@ -155,11 +156,12 @@ func (*GetPolicyRequest) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type GetPolicyResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
|
||||
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
|
||||
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetPolicyResponse) Reset() {
|
||||
@@ -208,29 +210,42 @@ func (x *GetPolicyResponse) GetUpdatedAt() *timestamppb.Timestamp {
|
||||
|
||||
var File_headscale_v1_policy_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_headscale_v1_policy_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x19headscale/v1/policy.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"*\n" +
|
||||
"\x10SetPolicyRequest\x12\x16\n" +
|
||||
"\x06policy\x18\x01 \x01(\tR\x06policy\"f\n" +
|
||||
"\x11SetPolicyResponse\x12\x16\n" +
|
||||
"\x06policy\x18\x01 \x01(\tR\x06policy\x129\n" +
|
||||
"\n" +
|
||||
"updated_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\tupdatedAt\"\x12\n" +
|
||||
"\x10GetPolicyRequest\"f\n" +
|
||||
"\x11GetPolicyResponse\x12\x16\n" +
|
||||
"\x06policy\x18\x01 \x01(\tR\x06policy\x129\n" +
|
||||
"\n" +
|
||||
"updated_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\tupdatedAtB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
var file_headscale_v1_policy_proto_rawDesc = []byte{
|
||||
0x0a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x70,
|
||||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2a, 0x0a, 0x10, 0x53, 0x65,
|
||||
0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16,
|
||||
0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
|
||||
0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x66, 0x0a, 0x11, 0x53, 0x65, 0x74, 0x50, 0x6f, 0x6c,
|
||||
0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70,
|
||||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6f, 0x6c,
|
||||
0x69, 0x63, 0x79, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61,
|
||||
0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
|
||||
0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x12,
|
||||
0x0a, 0x10, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x22, 0x66, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63,
|
||||
0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
|
||||
0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
|
||||
0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69,
|
||||
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e,
|
||||
0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f,
|
||||
0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_headscale_v1_policy_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_policy_proto_rawDescData []byte
|
||||
file_headscale_v1_policy_proto_rawDescData = file_headscale_v1_policy_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_headscale_v1_policy_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_policy_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_policy_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_policy_proto_rawDesc), len(file_headscale_v1_policy_proto_rawDesc)))
|
||||
file_headscale_v1_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_policy_proto_rawDescData)
|
||||
})
|
||||
return file_headscale_v1_policy_proto_rawDescData
|
||||
}
|
||||
@@ -262,7 +277,7 @@ func file_headscale_v1_policy_proto_init() {
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_policy_proto_rawDesc), len(file_headscale_v1_policy_proto_rawDesc)),
|
||||
RawDescriptor: file_headscale_v1_policy_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
@@ -273,6 +288,7 @@ func file_headscale_v1_policy_proto_init() {
|
||||
MessageInfos: file_headscale_v1_policy_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_policy_proto = out.File
|
||||
file_headscale_v1_policy_proto_rawDesc = nil
|
||||
file_headscale_v1_policy_proto_goTypes = nil
|
||||
file_headscale_v1_policy_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc-gen-go v1.35.2
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/preauthkey.proto
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -23,18 +22,19 @@ const (
|
||||
)
|
||||
|
||||
type PreAuthKey struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Reusable bool `protobuf:"varint,4,opt,name=reusable,proto3" json:"reusable,omitempty"`
|
||||
Ephemeral bool `protobuf:"varint,5,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
|
||||
Used bool `protobuf:"varint,6,opt,name=used,proto3" json:"used,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
AclTags []string `protobuf:"bytes,9,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Reusable bool `protobuf:"varint,4,opt,name=reusable,proto3" json:"reusable,omitempty"`
|
||||
Ephemeral bool `protobuf:"varint,5,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
|
||||
Used bool `protobuf:"varint,6,opt,name=used,proto3" json:"used,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
AclTags []string `protobuf:"bytes,9,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) Reset() {
|
||||
@@ -67,18 +67,18 @@ func (*PreAuthKey) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetUser() *User {
|
||||
func (x *PreAuthKey) GetUser() string {
|
||||
if x != nil {
|
||||
return x.User
|
||||
}
|
||||
return nil
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetId() uint64 {
|
||||
func (x *PreAuthKey) GetId() string {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetKey() string {
|
||||
@@ -131,14 +131,15 @@ func (x *PreAuthKey) GetAclTags() []string {
|
||||
}
|
||||
|
||||
type CreatePreAuthKeyRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
Reusable bool `protobuf:"varint,2,opt,name=reusable,proto3" json:"reusable,omitempty"`
|
||||
Ephemeral bool `protobuf:"varint,3,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
AclTags []string `protobuf:"bytes,5,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
Reusable bool `protobuf:"varint,2,opt,name=reusable,proto3" json:"reusable,omitempty"`
|
||||
Ephemeral bool `protobuf:"varint,3,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
AclTags []string `protobuf:"bytes,5,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) Reset() {
|
||||
@@ -171,11 +172,11 @@ func (*CreatePreAuthKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) GetUser() uint64 {
|
||||
func (x *CreatePreAuthKeyRequest) GetUser() string {
|
||||
if x != nil {
|
||||
return x.User
|
||||
}
|
||||
return 0
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) GetReusable() bool {
|
||||
@@ -207,10 +208,11 @@ func (x *CreatePreAuthKeyRequest) GetAclTags() []string {
|
||||
}
|
||||
|
||||
type CreatePreAuthKeyResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
PreAuthKey *PreAuthKey `protobuf:"bytes,1,opt,name=pre_auth_key,json=preAuthKey,proto3" json:"pre_auth_key,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
PreAuthKey *PreAuthKey `protobuf:"bytes,1,opt,name=pre_auth_key,json=preAuthKey,proto3" json:"pre_auth_key,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyResponse) Reset() {
|
||||
@@ -251,11 +253,12 @@ func (x *CreatePreAuthKeyResponse) GetPreAuthKey() *PreAuthKey {
|
||||
}
|
||||
|
||||
type ExpirePreAuthKeyRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) Reset() {
|
||||
@@ -288,11 +291,11 @@ func (*ExpirePreAuthKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) GetUser() uint64 {
|
||||
func (x *ExpirePreAuthKeyRequest) GetUser() string {
|
||||
if x != nil {
|
||||
return x.User
|
||||
}
|
||||
return 0
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) GetKey() string {
|
||||
@@ -303,9 +306,9 @@ func (x *ExpirePreAuthKeyRequest) GetKey() string {
|
||||
}
|
||||
|
||||
type ExpirePreAuthKeyResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyResponse) Reset() {
|
||||
@@ -339,10 +342,11 @@ func (*ExpirePreAuthKeyResponse) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type ListPreAuthKeysRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) Reset() {
|
||||
@@ -375,18 +379,19 @@ func (*ListPreAuthKeysRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) GetUser() uint64 {
|
||||
func (x *ListPreAuthKeysRequest) GetUser() string {
|
||||
if x != nil {
|
||||
return x.User
|
||||
}
|
||||
return 0
|
||||
return ""
|
||||
}
|
||||
|
||||
type ListPreAuthKeysResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
PreAuthKeys []*PreAuthKey `protobuf:"bytes,1,rep,name=pre_auth_keys,json=preAuthKeys,proto3" json:"pre_auth_keys,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
PreAuthKeys []*PreAuthKey `protobuf:"bytes,1,rep,name=pre_auth_keys,json=preAuthKeys,proto3" json:"pre_auth_keys,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysResponse) Reset() {
|
||||
@@ -428,51 +433,76 @@ func (x *ListPreAuthKeysResponse) GetPreAuthKeys() []*PreAuthKey {
|
||||
|
||||
var File_headscale_v1_preauthkey_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_headscale_v1_preauthkey_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x1dheadscale/v1/preauthkey.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17headscale/v1/user.proto\"\xb6\x02\n" +
|
||||
"\n" +
|
||||
"PreAuthKey\x12&\n" +
|
||||
"\x04user\x18\x01 \x01(\v2\x12.headscale.v1.UserR\x04user\x12\x0e\n" +
|
||||
"\x02id\x18\x02 \x01(\x04R\x02id\x12\x10\n" +
|
||||
"\x03key\x18\x03 \x01(\tR\x03key\x12\x1a\n" +
|
||||
"\breusable\x18\x04 \x01(\bR\breusable\x12\x1c\n" +
|
||||
"\tephemeral\x18\x05 \x01(\bR\tephemeral\x12\x12\n" +
|
||||
"\x04used\x18\x06 \x01(\bR\x04used\x12:\n" +
|
||||
"\n" +
|
||||
"expiration\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\n" +
|
||||
"expiration\x129\n" +
|
||||
"\n" +
|
||||
"created_at\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x12\x19\n" +
|
||||
"\bacl_tags\x18\t \x03(\tR\aaclTags\"\xbe\x01\n" +
|
||||
"\x17CreatePreAuthKeyRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\x04R\x04user\x12\x1a\n" +
|
||||
"\breusable\x18\x02 \x01(\bR\breusable\x12\x1c\n" +
|
||||
"\tephemeral\x18\x03 \x01(\bR\tephemeral\x12:\n" +
|
||||
"\n" +
|
||||
"expiration\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\n" +
|
||||
"expiration\x12\x19\n" +
|
||||
"\bacl_tags\x18\x05 \x03(\tR\aaclTags\"V\n" +
|
||||
"\x18CreatePreAuthKeyResponse\x12:\n" +
|
||||
"\fpre_auth_key\x18\x01 \x01(\v2\x18.headscale.v1.PreAuthKeyR\n" +
|
||||
"preAuthKey\"?\n" +
|
||||
"\x17ExpirePreAuthKeyRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\x04R\x04user\x12\x10\n" +
|
||||
"\x03key\x18\x02 \x01(\tR\x03key\"\x1a\n" +
|
||||
"\x18ExpirePreAuthKeyResponse\",\n" +
|
||||
"\x16ListPreAuthKeysRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\x04R\x04user\"W\n" +
|
||||
"\x17ListPreAuthKeysResponse\x12<\n" +
|
||||
"\rpre_auth_keys\x18\x01 \x03(\v2\x18.headscale.v1.PreAuthKeyR\vpreAuthKeysB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
var file_headscale_v1_preauthkey_proto_rawDesc = []byte{
|
||||
0x0a, 0x1d, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x70,
|
||||
0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
|
||||
0x0c, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2,
|
||||
0x02, 0x0a, 0x0a, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a,
|
||||
0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65,
|
||||
0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69,
|
||||
0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
|
||||
0x6b, 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18,
|
||||
0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12,
|
||||
0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01,
|
||||
0x28, 0x08, 0x52, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x12, 0x0a,
|
||||
0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x75, 0x73, 0x65,
|
||||
0x64, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
|
||||
0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
|
||||
0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a,
|
||||
0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63,
|
||||
0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x6c, 0x5f,
|
||||
0x74, 0x61, 0x67, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x6c, 0x54,
|
||||
0x61, 0x67, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72,
|
||||
0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
|
||||
0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75,
|
||||
0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12,
|
||||
0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x08, 0x52, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x3a, 0x0a,
|
||||
0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65,
|
||||
0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x6c,
|
||||
0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x6c,
|
||||
0x54, 0x61, 0x67, 0x73, 0x22, 0x56, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72,
|
||||
0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6b, 0x65, 0x79,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79,
|
||||
0x52, 0x0a, 0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x22, 0x3f, 0x0a, 0x17,
|
||||
0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b,
|
||||
0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x1a, 0x0a,
|
||||
0x18, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65,
|
||||
0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x0a, 0x16, 0x4c, 0x69, 0x73,
|
||||
0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x22, 0x57, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x50,
|
||||
0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6b,
|
||||
0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68,
|
||||
0x4b, 0x65, 0x79, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73,
|
||||
0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a,
|
||||
0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_headscale_v1_preauthkey_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_preauthkey_proto_rawDescData []byte
|
||||
file_headscale_v1_preauthkey_proto_rawDescData = file_headscale_v1_preauthkey_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_headscale_v1_preauthkey_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_preauthkey_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_preauthkey_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_preauthkey_proto_rawDesc), len(file_headscale_v1_preauthkey_proto_rawDesc)))
|
||||
file_headscale_v1_preauthkey_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_preauthkey_proto_rawDescData)
|
||||
})
|
||||
return file_headscale_v1_preauthkey_proto_rawDescData
|
||||
}
|
||||
@@ -486,21 +516,19 @@ var file_headscale_v1_preauthkey_proto_goTypes = []any{
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 4: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysRequest)(nil), // 5: headscale.v1.ListPreAuthKeysRequest
|
||||
(*ListPreAuthKeysResponse)(nil), // 6: headscale.v1.ListPreAuthKeysResponse
|
||||
(*User)(nil), // 7: headscale.v1.User
|
||||
(*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp
|
||||
(*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp
|
||||
}
|
||||
var file_headscale_v1_preauthkey_proto_depIdxs = []int32{
|
||||
7, // 0: headscale.v1.PreAuthKey.user:type_name -> headscale.v1.User
|
||||
8, // 1: headscale.v1.PreAuthKey.expiration:type_name -> google.protobuf.Timestamp
|
||||
8, // 2: headscale.v1.PreAuthKey.created_at:type_name -> google.protobuf.Timestamp
|
||||
8, // 3: headscale.v1.CreatePreAuthKeyRequest.expiration:type_name -> google.protobuf.Timestamp
|
||||
0, // 4: headscale.v1.CreatePreAuthKeyResponse.pre_auth_key:type_name -> headscale.v1.PreAuthKey
|
||||
0, // 5: headscale.v1.ListPreAuthKeysResponse.pre_auth_keys:type_name -> headscale.v1.PreAuthKey
|
||||
6, // [6:6] is the sub-list for method output_type
|
||||
6, // [6:6] is the sub-list for method input_type
|
||||
6, // [6:6] is the sub-list for extension type_name
|
||||
6, // [6:6] is the sub-list for extension extendee
|
||||
0, // [0:6] is the sub-list for field type_name
|
||||
7, // 0: headscale.v1.PreAuthKey.expiration:type_name -> google.protobuf.Timestamp
|
||||
7, // 1: headscale.v1.PreAuthKey.created_at:type_name -> google.protobuf.Timestamp
|
||||
7, // 2: headscale.v1.CreatePreAuthKeyRequest.expiration:type_name -> google.protobuf.Timestamp
|
||||
0, // 3: headscale.v1.CreatePreAuthKeyResponse.pre_auth_key:type_name -> headscale.v1.PreAuthKey
|
||||
0, // 4: headscale.v1.ListPreAuthKeysResponse.pre_auth_keys:type_name -> headscale.v1.PreAuthKey
|
||||
5, // [5:5] is the sub-list for method output_type
|
||||
5, // [5:5] is the sub-list for method input_type
|
||||
5, // [5:5] is the sub-list for extension type_name
|
||||
5, // [5:5] is the sub-list for extension extendee
|
||||
0, // [0:5] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_preauthkey_proto_init() }
|
||||
@@ -508,12 +536,11 @@ func file_headscale_v1_preauthkey_proto_init() {
|
||||
if File_headscale_v1_preauthkey_proto != nil {
|
||||
return
|
||||
}
|
||||
file_headscale_v1_user_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_preauthkey_proto_rawDesc), len(file_headscale_v1_preauthkey_proto_rawDesc)),
|
||||
RawDescriptor: file_headscale_v1_preauthkey_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 7,
|
||||
NumExtensions: 0,
|
||||
@@ -524,6 +551,7 @@ func file_headscale_v1_preauthkey_proto_init() {
|
||||
MessageInfos: file_headscale_v1_preauthkey_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_preauthkey_proto = out.File
|
||||
file_headscale_v1_preauthkey_proto_rawDesc = nil
|
||||
file_headscale_v1_preauthkey_proto_goTypes = nil
|
||||
file_headscale_v1_preauthkey_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
677
gen/go/headscale/v1/routes.pb.go
Normal file
677
gen/go/headscale/v1/routes.pb.go
Normal file
@@ -0,0 +1,677 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.2
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/routes.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Route struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"`
|
||||
Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
Advertised bool `protobuf:"varint,4,opt,name=advertised,proto3" json:"advertised,omitempty"`
|
||||
Enabled bool `protobuf:"varint,5,opt,name=enabled,proto3" json:"enabled,omitempty"`
|
||||
IsPrimary bool `protobuf:"varint,6,opt,name=is_primary,json=isPrimary,proto3" json:"is_primary,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
|
||||
DeletedAt *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=deleted_at,json=deletedAt,proto3" json:"deleted_at,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Route) Reset() {
|
||||
*x = Route{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *Route) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Route) ProtoMessage() {}
|
||||
|
||||
func (x *Route) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Route.ProtoReflect.Descriptor instead.
|
||||
func (*Route) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Route) GetId() uint64 {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Route) GetNode() *Node {
|
||||
if x != nil {
|
||||
return x.Node
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Route) GetPrefix() string {
|
||||
if x != nil {
|
||||
return x.Prefix
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Route) GetAdvertised() bool {
|
||||
if x != nil {
|
||||
return x.Advertised
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *Route) GetEnabled() bool {
|
||||
if x != nil {
|
||||
return x.Enabled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *Route) GetIsPrimary() bool {
|
||||
if x != nil {
|
||||
return x.IsPrimary
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *Route) GetCreatedAt() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.CreatedAt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Route) GetUpdatedAt() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.UpdatedAt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Route) GetDeletedAt() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.DeletedAt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GetRoutesRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *GetRoutesRequest) Reset() {
|
||||
*x = GetRoutesRequest{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *GetRoutesRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetRoutesRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetRoutesRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetRoutesRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetRoutesRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
type GetRoutesResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Routes []*Route `protobuf:"bytes,1,rep,name=routes,proto3" json:"routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetRoutesResponse) Reset() {
|
||||
*x = GetRoutesResponse{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *GetRoutesResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetRoutesResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetRoutesResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[2]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetRoutesResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetRoutesResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *GetRoutesResponse) GetRoutes() []*Route {
|
||||
if x != nil {
|
||||
return x.Routes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EnableRouteRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
RouteId uint64 `protobuf:"varint,1,opt,name=route_id,json=routeId,proto3" json:"route_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *EnableRouteRequest) Reset() {
|
||||
*x = EnableRouteRequest{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *EnableRouteRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EnableRouteRequest) ProtoMessage() {}
|
||||
|
||||
func (x *EnableRouteRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[3]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EnableRouteRequest.ProtoReflect.Descriptor instead.
|
||||
func (*EnableRouteRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *EnableRouteRequest) GetRouteId() uint64 {
|
||||
if x != nil {
|
||||
return x.RouteId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type EnableRouteResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *EnableRouteResponse) Reset() {
|
||||
*x = EnableRouteResponse{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *EnableRouteResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EnableRouteResponse) ProtoMessage() {}
|
||||
|
||||
func (x *EnableRouteResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[4]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EnableRouteResponse.ProtoReflect.Descriptor instead.
|
||||
func (*EnableRouteResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
type DisableRouteRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
RouteId uint64 `protobuf:"varint,1,opt,name=route_id,json=routeId,proto3" json:"route_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DisableRouteRequest) Reset() {
|
||||
*x = DisableRouteRequest{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *DisableRouteRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DisableRouteRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DisableRouteRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[5]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DisableRouteRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DisableRouteRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *DisableRouteRequest) GetRouteId() uint64 {
|
||||
if x != nil {
|
||||
return x.RouteId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type DisableRouteResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *DisableRouteResponse) Reset() {
|
||||
*x = DisableRouteResponse{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *DisableRouteResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DisableRouteResponse) ProtoMessage() {}
|
||||
|
||||
func (x *DisableRouteResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[6]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DisableRouteResponse.ProtoReflect.Descriptor instead.
|
||||
func (*DisableRouteResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
type GetNodeRoutesRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetNodeRoutesRequest) Reset() {
|
||||
*x = GetNodeRoutesRequest{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *GetNodeRoutesRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetNodeRoutesRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetNodeRoutesRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[7]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetNodeRoutesRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetNodeRoutesRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *GetNodeRoutesRequest) GetNodeId() uint64 {
|
||||
if x != nil {
|
||||
return x.NodeId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type GetNodeRoutesResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Routes []*Route `protobuf:"bytes,1,rep,name=routes,proto3" json:"routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetNodeRoutesResponse) Reset() {
|
||||
*x = GetNodeRoutesResponse{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *GetNodeRoutesResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetNodeRoutesResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetNodeRoutesResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[8]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetNodeRoutesResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetNodeRoutesResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *GetNodeRoutesResponse) GetRoutes() []*Route {
|
||||
if x != nil {
|
||||
return x.Routes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DeleteRouteRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
RouteId uint64 `protobuf:"varint,1,opt,name=route_id,json=routeId,proto3" json:"route_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DeleteRouteRequest) Reset() {
|
||||
*x = DeleteRouteRequest{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[9]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *DeleteRouteRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeleteRouteRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteRouteRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[9]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeleteRouteRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteRouteRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{9}
|
||||
}
|
||||
|
||||
func (x *DeleteRouteRequest) GetRouteId() uint64 {
|
||||
if x != nil {
|
||||
return x.RouteId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type DeleteRouteResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *DeleteRouteResponse) Reset() {
|
||||
*x = DeleteRouteResponse{}
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[10]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *DeleteRouteResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeleteRouteResponse) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteRouteResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[10]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeleteRouteResponse.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteRouteResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{10}
|
||||
}
|
||||
|
||||
var File_headscale_v1_routes_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_routes_proto_rawDesc = []byte{
|
||||
0x0a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x22, 0xe1, 0x02, 0x0a, 0x05, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x0e, 0x0a,
|
||||
0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x26, 0x0a,
|
||||
0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52,
|
||||
0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
|
||||
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1e, 0x0a,
|
||||
0x0a, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28,
|
||||
0x08, 0x52, 0x0a, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x12, 0x18, 0x0a,
|
||||
0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
|
||||
0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x70, 0x72,
|
||||
0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x50,
|
||||
0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65,
|
||||
0x64, 0x5f, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
|
||||
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41,
|
||||
0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18,
|
||||
0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
|
||||
0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a,
|
||||
0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
||||
0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x64, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x12, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x40, 0x0a, 0x11, 0x47,
|
||||
0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x12, 0x2b, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
|
||||
0x32, 0x13, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x2f, 0x0a,
|
||||
0x12, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x64, 0x22, 0x15,
|
||||
0x0a, 0x13, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x0a, 0x13, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65,
|
||||
0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08,
|
||||
0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07,
|
||||
0x72, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x64, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x69, 0x73, 0x61, 0x62,
|
||||
0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x2f, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f,
|
||||
0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64,
|
||||
0x22, 0x44, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x06, 0x72, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06,
|
||||
0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x2f, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
|
||||
0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08,
|
||||
0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07,
|
||||
0x72, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74,
|
||||
0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x29,
|
||||
0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61,
|
||||
0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f,
|
||||
0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_headscale_v1_routes_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_routes_proto_rawDescData = file_headscale_v1_routes_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_headscale_v1_routes_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_routes_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_routes_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_routes_proto_rawDescData)
|
||||
})
|
||||
return file_headscale_v1_routes_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_headscale_v1_routes_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
|
||||
var file_headscale_v1_routes_proto_goTypes = []any{
|
||||
(*Route)(nil), // 0: headscale.v1.Route
|
||||
(*GetRoutesRequest)(nil), // 1: headscale.v1.GetRoutesRequest
|
||||
(*GetRoutesResponse)(nil), // 2: headscale.v1.GetRoutesResponse
|
||||
(*EnableRouteRequest)(nil), // 3: headscale.v1.EnableRouteRequest
|
||||
(*EnableRouteResponse)(nil), // 4: headscale.v1.EnableRouteResponse
|
||||
(*DisableRouteRequest)(nil), // 5: headscale.v1.DisableRouteRequest
|
||||
(*DisableRouteResponse)(nil), // 6: headscale.v1.DisableRouteResponse
|
||||
(*GetNodeRoutesRequest)(nil), // 7: headscale.v1.GetNodeRoutesRequest
|
||||
(*GetNodeRoutesResponse)(nil), // 8: headscale.v1.GetNodeRoutesResponse
|
||||
(*DeleteRouteRequest)(nil), // 9: headscale.v1.DeleteRouteRequest
|
||||
(*DeleteRouteResponse)(nil), // 10: headscale.v1.DeleteRouteResponse
|
||||
(*Node)(nil), // 11: headscale.v1.Node
|
||||
(*timestamppb.Timestamp)(nil), // 12: google.protobuf.Timestamp
|
||||
}
|
||||
var file_headscale_v1_routes_proto_depIdxs = []int32{
|
||||
11, // 0: headscale.v1.Route.node:type_name -> headscale.v1.Node
|
||||
12, // 1: headscale.v1.Route.created_at:type_name -> google.protobuf.Timestamp
|
||||
12, // 2: headscale.v1.Route.updated_at:type_name -> google.protobuf.Timestamp
|
||||
12, // 3: headscale.v1.Route.deleted_at:type_name -> google.protobuf.Timestamp
|
||||
0, // 4: headscale.v1.GetRoutesResponse.routes:type_name -> headscale.v1.Route
|
||||
0, // 5: headscale.v1.GetNodeRoutesResponse.routes:type_name -> headscale.v1.Route
|
||||
6, // [6:6] is the sub-list for method output_type
|
||||
6, // [6:6] is the sub-list for method input_type
|
||||
6, // [6:6] is the sub-list for extension type_name
|
||||
6, // [6:6] is the sub-list for extension extendee
|
||||
0, // [0:6] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_routes_proto_init() }
|
||||
func file_headscale_v1_routes_proto_init() {
|
||||
if File_headscale_v1_routes_proto != nil {
|
||||
return
|
||||
}
|
||||
file_headscale_v1_node_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_routes_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 11,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_headscale_v1_routes_proto_goTypes,
|
||||
DependencyIndexes: file_headscale_v1_routes_proto_depIdxs,
|
||||
MessageInfos: file_headscale_v1_routes_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_routes_proto = out.File
|
||||
file_headscale_v1_routes_proto_rawDesc = nil
|
||||
file_headscale_v1_routes_proto_goTypes = nil
|
||||
file_headscale_v1_routes_proto_depIdxs = nil
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc-gen-go v1.35.2
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/user.proto
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -23,7 +22,10 @@ const (
|
||||
)
|
||||
|
||||
type User struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
@@ -32,8 +34,6 @@ type User struct {
|
||||
ProviderId string `protobuf:"bytes,6,opt,name=provider_id,json=providerId,proto3" json:"provider_id,omitempty"`
|
||||
Provider string `protobuf:"bytes,7,opt,name=provider,proto3" json:"provider,omitempty"`
|
||||
ProfilePicUrl string `protobuf:"bytes,8,opt,name=profile_pic_url,json=profilePicUrl,proto3" json:"profile_pic_url,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *User) Reset() {
|
||||
@@ -123,13 +123,14 @@ func (x *User) GetProfilePicUrl() string {
|
||||
}
|
||||
|
||||
type CreateUserRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
|
||||
PictureUrl string `protobuf:"bytes,4,opt,name=picture_url,json=pictureUrl,proto3" json:"picture_url,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
|
||||
PictureUrl string `protobuf:"bytes,4,opt,name=picture_url,json=pictureUrl,proto3" json:"picture_url,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreateUserRequest) Reset() {
|
||||
@@ -191,10 +192,11 @@ func (x *CreateUserRequest) GetPictureUrl() string {
|
||||
}
|
||||
|
||||
type CreateUserResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreateUserResponse) Reset() {
|
||||
@@ -235,11 +237,12 @@ func (x *CreateUserResponse) GetUser() *User {
|
||||
}
|
||||
|
||||
type RenameUserRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
OldId uint64 `protobuf:"varint,1,opt,name=old_id,json=oldId,proto3" json:"old_id,omitempty"`
|
||||
NewName string `protobuf:"bytes,2,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
OldId uint64 `protobuf:"varint,1,opt,name=old_id,json=oldId,proto3" json:"old_id,omitempty"`
|
||||
NewName string `protobuf:"bytes,2,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *RenameUserRequest) Reset() {
|
||||
@@ -287,10 +290,11 @@ func (x *RenameUserRequest) GetNewName() string {
|
||||
}
|
||||
|
||||
type RenameUserResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
}
|
||||
|
||||
func (x *RenameUserResponse) Reset() {
|
||||
@@ -331,10 +335,11 @@ func (x *RenameUserResponse) GetUser() *User {
|
||||
}
|
||||
|
||||
type DeleteUserRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DeleteUserRequest) Reset() {
|
||||
@@ -375,9 +380,9 @@ func (x *DeleteUserRequest) GetId() uint64 {
|
||||
}
|
||||
|
||||
type DeleteUserResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *DeleteUserResponse) Reset() {
|
||||
@@ -411,12 +416,13 @@ func (*DeleteUserResponse) Descriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type ListUsersRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListUsersRequest) Reset() {
|
||||
@@ -471,10 +477,11 @@ func (x *ListUsersRequest) GetEmail() string {
|
||||
}
|
||||
|
||||
type ListUsersResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Users []*User `protobuf:"bytes,1,rep,name=users,proto3" json:"users,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Users []*User `protobuf:"bytes,1,rep,name=users,proto3" json:"users,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListUsersResponse) Reset() {
|
||||
@@ -516,51 +523,74 @@ func (x *ListUsersResponse) GetUsers() []*User {
|
||||
|
||||
var File_headscale_v1_user_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_headscale_v1_user_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x17headscale/v1/user.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\x83\x02\n" +
|
||||
"\x04User\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\x04R\x02id\x12\x12\n" +
|
||||
"\x04name\x18\x02 \x01(\tR\x04name\x129\n" +
|
||||
"\n" +
|
||||
"created_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x12!\n" +
|
||||
"\fdisplay_name\x18\x04 \x01(\tR\vdisplayName\x12\x14\n" +
|
||||
"\x05email\x18\x05 \x01(\tR\x05email\x12\x1f\n" +
|
||||
"\vprovider_id\x18\x06 \x01(\tR\n" +
|
||||
"providerId\x12\x1a\n" +
|
||||
"\bprovider\x18\a \x01(\tR\bprovider\x12&\n" +
|
||||
"\x0fprofile_pic_url\x18\b \x01(\tR\rprofilePicUrl\"\x81\x01\n" +
|
||||
"\x11CreateUserRequest\x12\x12\n" +
|
||||
"\x04name\x18\x01 \x01(\tR\x04name\x12!\n" +
|
||||
"\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12\x14\n" +
|
||||
"\x05email\x18\x03 \x01(\tR\x05email\x12\x1f\n" +
|
||||
"\vpicture_url\x18\x04 \x01(\tR\n" +
|
||||
"pictureUrl\"<\n" +
|
||||
"\x12CreateUserResponse\x12&\n" +
|
||||
"\x04user\x18\x01 \x01(\v2\x12.headscale.v1.UserR\x04user\"E\n" +
|
||||
"\x11RenameUserRequest\x12\x15\n" +
|
||||
"\x06old_id\x18\x01 \x01(\x04R\x05oldId\x12\x19\n" +
|
||||
"\bnew_name\x18\x02 \x01(\tR\anewName\"<\n" +
|
||||
"\x12RenameUserResponse\x12&\n" +
|
||||
"\x04user\x18\x01 \x01(\v2\x12.headscale.v1.UserR\x04user\"#\n" +
|
||||
"\x11DeleteUserRequest\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\x04R\x02id\"\x14\n" +
|
||||
"\x12DeleteUserResponse\"L\n" +
|
||||
"\x10ListUsersRequest\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\x04R\x02id\x12\x12\n" +
|
||||
"\x04name\x18\x02 \x01(\tR\x04name\x12\x14\n" +
|
||||
"\x05email\x18\x03 \x01(\tR\x05email\"=\n" +
|
||||
"\x11ListUsersResponse\x12(\n" +
|
||||
"\x05users\x18\x01 \x03(\v2\x12.headscale.v1.UserR\x05usersB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
var file_headscale_v1_user_proto_rawDesc = []byte{
|
||||
0x0a, 0x17, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x75,
|
||||
0x73, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
|
||||
0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x83, 0x02, 0x0a, 0x04, 0x55, 0x73, 0x65,
|
||||
0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69,
|
||||
0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
|
||||
0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
|
||||
0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74,
|
||||
0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e,
|
||||
0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x6f,
|
||||
0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
|
||||
0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72,
|
||||
0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72,
|
||||
0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c,
|
||||
0x65, 0x5f, 0x70, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x0d, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x50, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x22, 0x81,
|
||||
0x01, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70,
|
||||
0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
|
||||
0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65,
|
||||
0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69,
|
||||
0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x69, 0x63, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x75, 0x72, 0x6c,
|
||||
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x69, 0x63, 0x74, 0x75, 0x72, 0x65, 0x55,
|
||||
0x72, 0x6c, 0x22, 0x3c, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72,
|
||||
0x22, 0x45, 0x0a, 0x11, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6f, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6f, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08,
|
||||
0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
|
||||
0x6e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3c, 0x0a, 0x12, 0x52, 0x65, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a,
|
||||
0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x52,
|
||||
0x04, 0x75, 0x73, 0x65, 0x72, 0x22, 0x23, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55,
|
||||
0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x22, 0x14, 0x0a, 0x12, 0x44, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x22, 0x4c, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04,
|
||||
0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69,
|
||||
0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x3d,
|
||||
0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x75, 0x73, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03,
|
||||
0x28, 0x0b, 0x32, 0x12, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x52, 0x05, 0x75, 0x73, 0x65, 0x72, 0x73, 0x42, 0x29, 0x5a,
|
||||
0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e,
|
||||
0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67,
|
||||
0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_headscale_v1_user_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_user_proto_rawDescData []byte
|
||||
file_headscale_v1_user_proto_rawDescData = file_headscale_v1_user_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_headscale_v1_user_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_user_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_user_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_user_proto_rawDesc), len(file_headscale_v1_user_proto_rawDesc)))
|
||||
file_headscale_v1_user_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_user_proto_rawDescData)
|
||||
})
|
||||
return file_headscale_v1_user_proto_rawDescData
|
||||
}
|
||||
@@ -599,7 +629,7 @@ func file_headscale_v1_user_proto_init() {
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_user_proto_rawDesc), len(file_headscale_v1_user_proto_rawDesc)),
|
||||
RawDescriptor: file_headscale_v1_user_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 9,
|
||||
NumExtensions: 0,
|
||||
@@ -610,6 +640,7 @@ func file_headscale_v1_user_proto_init() {
|
||||
MessageInfos: file_headscale_v1_user_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_user_proto = out.File
|
||||
file_headscale_v1_user_proto_rawDesc = nil
|
||||
file_headscale_v1_user_proto_goTypes = nil
|
||||
file_headscale_v1_user_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
@@ -187,35 +187,6 @@
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "id",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "name",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "hostname",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "ipAddresses",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"collectionFormat": "multi"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
@@ -349,45 +320,6 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/node/{nodeId}/approve_routes": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_SetApprovedRoutes",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1SetApprovedRoutesResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "nodeId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/HeadscaleServiceSetApprovedRoutesBody"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/node/{nodeId}/expire": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_ExpireNode",
|
||||
@@ -456,6 +388,37 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/node/{nodeId}/routes": {
|
||||
"get": {
|
||||
"operationId": "HeadscaleService_GetNodeRoutes",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1GetNodeRoutesResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "nodeId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/node/{nodeId}/tags": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_SetTags",
|
||||
@@ -609,8 +572,7 @@
|
||||
"name": "user",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
@@ -681,6 +643,122 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/routes": {
|
||||
"get": {
|
||||
"summary": "--- Route start ---",
|
||||
"operationId": "HeadscaleService_GetRoutes",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1GetRoutesResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/routes/{routeId}": {
|
||||
"delete": {
|
||||
"operationId": "HeadscaleService_DeleteRoute",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1DeleteRouteResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "routeId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/routes/{routeId}/disable": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_DisableRoute",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1DisableRouteResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "routeId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/routes/{routeId}/enable": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_EnableRoute",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1EnableRouteResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "routeId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/user": {
|
||||
"get": {
|
||||
"operationId": "HeadscaleService_ListUsers",
|
||||
@@ -829,19 +907,7 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
}
|
||||
},
|
||||
"HeadscaleServiceSetApprovedRoutesBody": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"routes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -940,8 +1006,7 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
"type": "string"
|
||||
},
|
||||
"reusable": {
|
||||
"type": "boolean"
|
||||
@@ -1028,9 +1093,18 @@
|
||||
"v1DeleteNodeResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1DeleteRouteResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1DeleteUserResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1DisableRouteResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1EnableRouteResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1ExpireApiKeyRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -1054,8 +1128,7 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
"type": "string"
|
||||
},
|
||||
"key": {
|
||||
"type": "string"
|
||||
@@ -1073,6 +1146,18 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1GetNodeRoutesResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"routes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"$ref": "#/definitions/v1Route"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1GetPolicyResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -1085,6 +1170,18 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1GetRoutesResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"routes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"$ref": "#/definitions/v1Route"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1ListApiKeysResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -1210,24 +1307,6 @@
|
||||
},
|
||||
"online": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"approvedRoutes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"availableRoutes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"subnetRoutes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1235,11 +1314,10 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user": {
|
||||
"$ref": "#/definitions/v1User"
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
"type": "string"
|
||||
},
|
||||
"key": {
|
||||
"type": "string"
|
||||
@@ -1303,11 +1381,39 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1SetApprovedRoutesResponse": {
|
||||
"v1Route": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
"node": {
|
||||
"$ref": "#/definitions/v1Node"
|
||||
},
|
||||
"prefix": {
|
||||
"type": "string"
|
||||
},
|
||||
"advertised": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"isPrimary": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"createdAt": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"updatedAt": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"deletedAt": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
44
gen/openapiv2/headscale/v1/routes.swagger.json
Normal file
44
gen/openapiv2/headscale/v1/routes.swagger.json
Normal file
@@ -0,0 +1,44 @@
|
||||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "headscale/v1/routes.proto",
|
||||
"version": "version not set"
|
||||
},
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {},
|
||||
"definitions": {
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"@type": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": {}
|
||||
},
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
206
go.mod
206
go.mod
@@ -1,62 +1,56 @@
|
||||
module github.com/juanfont/headscale
|
||||
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.2
|
||||
go 1.23.1
|
||||
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7
|
||||
github.com/arl/statsviz v0.6.0
|
||||
github.com/cenkalti/backoff/v5 v5.0.2
|
||||
github.com/cenkalti/backoff/v4 v4.3.0
|
||||
github.com/chasefleming/elem-go v0.30.0
|
||||
github.com/coder/websocket v1.8.13
|
||||
github.com/coreos/go-oidc/v3 v3.14.1
|
||||
github.com/creachadair/command v0.1.22
|
||||
github.com/creachadair/flax v0.0.5
|
||||
github.com/coder/websocket v1.8.12
|
||||
github.com/coreos/go-oidc/v3 v3.11.0
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/docker/docker v28.2.2+incompatible
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/fsnotify/fsnotify v1.8.0
|
||||
github.com/glebarez/sqlite v1.11.0
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.4
|
||||
github.com/gofrs/uuid/v5 v5.3.2
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.3
|
||||
github.com/gofrs/uuid/v5 v5.3.0
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0
|
||||
github.com/jagottsicher/termcolor v1.0.2
|
||||
github.com/klauspost/compress v1.18.0
|
||||
github.com/klauspost/compress v1.17.11
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25
|
||||
github.com/ory/dockertest/v3 v3.12.0
|
||||
github.com/ory/dockertest/v3 v3.11.0
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1
|
||||
github.com/pkg/profile v1.7.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/common v0.65.0
|
||||
github.com/pterm/pterm v0.12.81
|
||||
github.com/puzpuzpuz/xsync/v4 v4.1.0
|
||||
github.com/rs/zerolog v1.34.0
|
||||
github.com/samber/lo v1.51.0
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/prometheus/common v0.61.0
|
||||
github.com/pterm/pterm v0.12.80
|
||||
github.com/puzpuzpuz/xsync/v3 v3.4.0
|
||||
github.com/rs/zerolog v1.33.0
|
||||
github.com/samber/lo v1.47.0
|
||||
github.com/sasha-s/go-deadlock v0.3.5
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/viper v1.20.1
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/viper v1.20.0-alpha.6
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97
|
||||
github.com/tailscale/hujson v0.0.0-20241010212012-29efb4a0184b
|
||||
github.com/tailscale/tailsql v0.0.0-20241211062219-bf96884c6a49
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
||||
golang.org/x/crypto v0.39.0
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0
|
||||
golang.org/x/net v0.41.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/sync v0.15.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
golang.org/x/crypto v0.32.0
|
||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8
|
||||
golang.org/x/net v0.34.0
|
||||
golang.org/x/oauth2 v0.25.0
|
||||
golang.org/x/sync v0.10.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484
|
||||
google.golang.org/grpc v1.69.0
|
||||
google.golang.org/protobuf v1.36.0
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gorm.io/driver/postgres v1.6.0
|
||||
gorm.io/gorm v1.30.0
|
||||
tailscale.com v1.84.2
|
||||
zgo.at/zcache/v2 v2.2.0
|
||||
gorm.io/driver/postgres v1.5.11
|
||||
gorm.io/gorm v1.25.12
|
||||
tailscale.com v1.80.0
|
||||
zgo.at/zcache/v2 v2.1.0
|
||||
zombiezen.com/go/postgrestest v1.0.1
|
||||
)
|
||||
|
||||
@@ -78,10 +72,10 @@ require (
|
||||
// together, e.g:
|
||||
// go get modernc.org/libc@v1.55.3 modernc.org/sqlite@v1.33.1
|
||||
require (
|
||||
modernc.org/libc v1.62.1 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.10.0 // indirect
|
||||
modernc.org/sqlite v1.37.0
|
||||
modernc.org/libc v1.55.3 // indirect
|
||||
modernc.org/mathutil v1.6.0 // indirect
|
||||
modernc.org/memory v1.8.0 // indirect
|
||||
modernc.org/sqlite v1.34.5 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -90,87 +84,83 @@ require (
|
||||
atomicgo.dev/schedule v0.1.0 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/akutz/memconn v0.1.0 // indirect
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.26.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 // indirect
|
||||
github.com/aws/smithy-go v1.22.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 // indirect
|
||||
github.com/aws/smithy-go v1.20.2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/containerd/console v1.0.5 // indirect
|
||||
github.com/containerd/console v1.0.4 // indirect
|
||||
github.com/containerd/continuity v0.4.5 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect
|
||||
github.com/creachadair/mds v0.24.3 // indirect
|
||||
github.com/creachadair/mds v0.20.0 // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 // indirect
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v28.1.1+incompatible // indirect
|
||||
github.com/docker/cli v27.4.1+incompatible // indirect
|
||||
github.com/docker/docker v27.4.1+incompatible // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/gaissmai/bart v0.18.0 // indirect
|
||||
github.com/gaissmai/bart v0.11.1 // indirect
|
||||
github.com/glebarez/go-sqlite v1.22.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.0 // indirect
|
||||
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.2 // indirect
|
||||
github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/google/go-github v17.0.0+incompatible // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 // indirect
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a // indirect
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gookit/color v1.5.4 // indirect
|
||||
github.com/gorilla/csrf v1.7.3 // indirect
|
||||
github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 // indirect
|
||||
github.com/gorilla/securecookie v1.1.2 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
|
||||
github.com/illarion/gonotify/v3 v3.0.2 // indirect
|
||||
github.com/illarion/gonotify/v2 v2.0.3 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/insomniacslk/dhcp v0.0.0-20240129002554-15c9b8791914 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/pgx/v5 v5.7.4 // indirect
|
||||
github.com/jackc/pgx/v5 v5.7.1 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.8 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mdlayher/genetlink v1.3.2 // indirect
|
||||
@@ -181,64 +171,60 @@ require (
|
||||
github.com/miekg/dns v1.1.58 // indirect
|
||||
github.com/mitchellh/go-ps v1.0.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/moby/sys/user v0.3.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opencontainers/runc v1.3.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/opencontainers/runc v1.2.3 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20241211131331-93ee7e083c43 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus-community/pro-bing v0.4.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.13.1 // indirect
|
||||
github.com/safchain/ethtool v0.3.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.9.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.6.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.14.0 // indirect
|
||||
github.com/spf13/cast v1.8.0 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.7.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect
|
||||
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 // indirect
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 // indirect
|
||||
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 // indirect
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc // indirect
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d // indirect
|
||||
github.com/tailscale/setec v0.0.0-20240930150730-e6eb93658ed3 // indirect
|
||||
github.com/tailscale/squibble v0.0.0-20240909231413-32a80b9743f7 // indirect
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 // indirect
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251 // indirect
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 // indirect
|
||||
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 // indirect
|
||||
github.com/vishvananda/netns v0.0.4 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 // indirect
|
||||
golang.org/x/mod v0.25.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
golang.org/x/time v0.10.0 // indirect
|
||||
golang.org/x/tools v0.33.0 // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab // indirect
|
||||
golang.org/x/term v0.28.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.29.0 // indirect
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 // indirect
|
||||
)
|
||||
|
||||
541
go.sum
541
go.sum
@@ -1,5 +1,3 @@
|
||||
9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f h1:1C7nZuxUMNz7eiQALRfiqNOm04+m3edWlRff/BYHf0Q=
|
||||
9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f/go.mod h1:hHyrZRryGqVdqrknjq5OWDLGCTJ2NeEvtrpR96mjraM=
|
||||
atomicgo.dev/assert v0.0.2 h1:FiKeMiZSgRrZsPo9qn/7vmr7mCsh5SZyXY4YGYiYwrg=
|
||||
atomicgo.dev/assert v0.0.2/go.mod h1:ut4NcI3QDdJtlmAxQULOmA13Gz6e2DWbSAS8RUOmNYQ=
|
||||
atomicgo.dev/cursor v0.2.0 h1:H6XN5alUJ52FZZUkI7AlJbUc1aW38GWZalpYRPpoPOw=
|
||||
@@ -8,6 +6,7 @@ atomicgo.dev/keyboard v0.2.9 h1:tOsIid3nlPLZ3lwgG8KZMp/SFmr7P0ssEN5JUsm78K8=
|
||||
atomicgo.dev/keyboard v0.2.9/go.mod h1:BC4w9g00XkxH/f1HXhW2sXmJFOCWbKn9xrOunSFtExQ=
|
||||
atomicgo.dev/schedule v0.1.0 h1:nTthAbhZS5YZmgYbb2+DH8uQIZcTlIrd4eYr3UQxEjs=
|
||||
atomicgo.dev/schedule v0.1.0/go.mod h1:xeUa3oAkiuHYh8bKiQBRojqAMq3PXXbJujjb0hw8pEU=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
||||
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
@@ -16,8 +15,9 @@ filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc=
|
||||
filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA=
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ=
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/MarvinJWendt/testza v0.1.0/go.mod h1:7AxNvlfeHP7Z/hDQ5JtE3OKYT3XFUeLCDE2DQninSqs=
|
||||
@@ -41,53 +41,53 @@ github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7V
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/arl/statsviz v0.6.0 h1:jbW1QJkEYQkufd//4NDYRSNBpwJNrdzPahF7ZmoGdyE=
|
||||
github.com/arl/statsviz v0.6.0/go.mod h1:0toboo+YGSUXDaS4g1D5TVS4dXs7S7YYT5J/qnW2h8s=
|
||||
github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31 h1:8IwBjuLdqIO1dGB+dZ9zJEl8wzY3bVYxcs0Xyu/Lsc0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31/go.mod h1:8tMBcuVjL4kP/ECEIWTCWtwV2kj6+ouEKl4cqR4iWLw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5 h1:siiQ+jummya9OLPDEyHVb2dLW4aOMe22FGDd0sAfuSw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5/go.mod h1:iHVx2J9pWzITdP5MJY6qWfG34TfD9EA+Qi3eV6qQCXw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 h1:tkVNm99nkJnFo1H9IIQb5QkCiPcvCDn3Pos+IeTbGRA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12/go.mod h1:dIVlquSPUMqEJtx2/W17SM2SuESRaVEhEV9alcMqxjw=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 h1:JBod0SnNqcWQ0+uAyzeRFG1zCHotW8DukumYYyNy0zo=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3/go.mod h1:FHSHmyEUkzRbaFFqqm6bkLAOQHgqhsLmfCahvCBMiyA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.11 h1:f47rANd2LQEYHda2ddSCKYId18/8BhSRM4BULGmfgNA=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.11/go.mod h1:SMsV78RIOYdve1vf36z8LmnszlRWkwMQtomCAI0/mIE=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.11 h1:YuIB1dJNf1Re822rriUOTxopaHHvIq0l/pX3fwO+Tzs=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.11/go.mod h1:AQtFPsDH9bI2O+71anW6EKL+NcD7LG3dpKGMV4SShgo=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0 h1:IOdss+igJDFdic9w3WKwxGCmHqUxydvIhJOm9LJ32Dk=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w=
|
||||
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
|
||||
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 h1:vN8hEbpRnL7+Hopy9dzmRle1xmDc7o8tmY0klsr175w=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.20.5/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw=
|
||||
github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q=
|
||||
github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chasefleming/elem-go v0.30.0 h1:BlhV1ekv1RbFiM8XZUQeln1Ikb4D+bu2eDO4agREvok=
|
||||
@@ -101,35 +101,25 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
||||
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/cilium/ebpf v0.17.3 h1:FnP4r16PWYSE4ux6zN+//jMcW4nMVRvuTLVTvCjyyjg=
|
||||
github.com/cilium/ebpf v0.17.3/go.mod h1:G5EDHij8yiLzaqn0WjyfJHvRa+3aDlReIaLVRMvOyJk=
|
||||
github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
|
||||
github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
|
||||
github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok=
|
||||
github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo=
|
||||
github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
github.com/containerd/console v1.0.5 h1:R0ymNeydRqH2DmakFNdmjR2k0t7UPuiOV/N/27/qqsc=
|
||||
github.com/containerd/console v1.0.5/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||
github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
|
||||
github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||
github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4=
|
||||
github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
|
||||
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
|
||||
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
|
||||
github.com/coreos/go-oidc/v3 v3.14.1 h1:9ePWwfdwC4QKRlCXsJGou56adA/owXczOzwKdOumLqk=
|
||||
github.com/coreos/go-oidc/v3 v3.14.1/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU=
|
||||
github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI=
|
||||
github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creachadair/command v0.1.22 h1:WmdrURwZdmPD1jm13SjKooaMoqo7mW1qI2BPCShs154=
|
||||
github.com/creachadair/command v0.1.22/go.mod h1:YFc+OMGucqTpxwQg/iJnNg8BMNmRPDK60rYy8ckgKwE=
|
||||
github.com/creachadair/flax v0.0.5 h1:zt+CRuXQASxwQ68e9GHAOnEgAU29nF0zYMHOCrL5wzE=
|
||||
github.com/creachadair/flax v0.0.5/go.mod h1:F1PML0JZLXSNDMNiRGK2yjm5f+L9QCHchyHBldFymj8=
|
||||
github.com/creachadair/mds v0.24.3 h1:X7cM2ymZSyl4IVWnfyXLxRXMJ6awhbcWvtLPhfnTaqI=
|
||||
github.com/creachadair/mds v0.24.3/go.mod h1:0oeHt9QWu8VfnmskOL4zi2CumjEvB29ScmtOmdrhFeU=
|
||||
github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc=
|
||||
github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creachadair/mds v0.20.0 h1:bXQO154c2TDgCY+rRmdIfUqjeqGYejmZ/QayeTNwbp8=
|
||||
github.com/creachadair/mds v0.20.0/go.mod h1:4b//mUiL8YldH6TImXjmW45myzTLNS1LLjOmrk888eg=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0=
|
||||
@@ -142,14 +132,12 @@ github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 h1:vrC07UZcgPzu/Oj
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0/go.mod h1:Nx87SkVqTKd8UtT+xu7sM/l+LgXs6c0aHrlKusR+2EQ=
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q=
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
|
||||
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
|
||||
github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k=
|
||||
github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw=
|
||||
github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/cli v27.4.1+incompatible h1:VzPiUlRJ/xh+otB75gva3r05isHMo5wXDfPRi5/b4hI=
|
||||
github.com/docker/cli v27.4.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4=
|
||||
github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
@@ -158,34 +146,37 @@ github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI=
|
||||
github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
|
||||
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
|
||||
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
|
||||
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/gaissmai/bart v0.18.0 h1:jQLBT/RduJu0pv/tLwXE+xKPgtWJejbxuXAR+wLJafo=
|
||||
github.com/gaissmai/bart v0.18.0/go.mod h1:JJzMAhNF5Rjo4SF4jWBrANuJfqY+FvsFhW7t1UZJ+XY=
|
||||
github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc=
|
||||
github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg=
|
||||
github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I=
|
||||
github.com/github/fakeca v0.1.0/go.mod h1:+bormgoGMMuamOscx7N91aOuUST7wdaJ2rNjeohylyo=
|
||||
github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ=
|
||||
github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc=
|
||||
github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw=
|
||||
github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.4 h1:KOPEt27qy1cNzHfMZbp9YTmEuzkY4F4wrdsJW9WFk1U=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.4/go.mod h1:y/6gPAH6QGAgP1UfHMiXcqGeJ88/GRQbfCReE1JJD5Y=
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY=
|
||||
github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.1.0 h1:cYSYxd3pw5zd2FSXk2vGdn9igQU2PS8MuxrCOCl0FdY=
|
||||
github.com/go-jose/go-jose/v4 v4.1.0/go.mod h1:GG/vqmYm3Von2nYiB2vGTXzdoNKE5tix5tuc6iAd+sw=
|
||||
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 h1:F8d1AJ6M9UQCavhwmO6ZsrYLfG8zVFWfEfMS2MXPkSY=
|
||||
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.3 h1:ei3Vq/rpPI/jCJY9mRHJAKg5vU+EhZyWhBAkaAomQuw=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.3/go.mod h1:VJ9FIOBAur+NmQ8c4tDVwOuiJcgupTG105FexPFrXzA=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk=
|
||||
github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
|
||||
github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 h1:KbX3Z3CgiYlbaavUq3Cj9/MjpO+88S7/AGXzynVDv84=
|
||||
github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
@@ -194,46 +185,49 @@ github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 h1:cf60tHxREO3g1nroKr2osU3JWZsJzkfi7rEg+oAB0Lo=
|
||||
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737/go.mod h1:MIS0jDzbU/vuM9MC4YnBITCv+RYuTRq8dJzmCrFsK9g=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg=
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU=
|
||||
github.com/gofrs/uuid/v5 v5.3.2 h1:2jfO8j3XgSwlz/wHqemAEugfnTlikAYHhnqQ8Xh4fE0=
|
||||
github.com/gofrs/uuid/v5 v5.3.2/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
|
||||
github.com/gofrs/uuid/v5 v5.3.0 h1:m0mUMr+oVYUdxpMLgSYCZiXe7PuVPnI94+OMeVBNedk=
|
||||
github.com/gofrs/uuid/v5 v5.3.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
|
||||
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/go-tpm v0.9.4 h1:awZRf9FwOeTunQmHoDYSHJps3ie6f1UlhS1fOdPEt1I=
|
||||
github.com/google/go-tpm v0.9.4/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI=
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a h1:rDA3FfmxwXR+BVKKdz55WwMJ1pD2hJQNW31d+l3mPk4=
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
@@ -242,16 +236,16 @@ github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQ
|
||||
github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo=
|
||||
github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0=
|
||||
github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w=
|
||||
github.com/gorilla/csrf v1.7.3 h1:BHWt6FTLZAb2HtWT5KDBf6qgpZzvtbp9QWDRKZMXJC0=
|
||||
github.com/gorilla/csrf v1.7.3/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk=
|
||||
github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 h1:fiJdrgVBkjZ5B1HJ2WQwNOaXB+QyYcNXTA3t1XYLz0M=
|
||||
github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA=
|
||||
github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 h1:+epNPbD5EqgpEMm5wrl4Hqts3jZt8+kYaqUisuuIGTk=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI=
|
||||
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
|
||||
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU=
|
||||
@@ -260,8 +254,8 @@ github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u
|
||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/illarion/gonotify/v3 v3.0.2 h1:O7S6vcopHexutmpObkeWsnzMJt/r1hONIEogeVNmJMk=
|
||||
github.com/illarion/gonotify/v3 v3.0.2/go.mod h1:HWGPdPe817GfvY3w7cx6zkbzNZfi3QjcBm/wgVvEL1U=
|
||||
github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A=
|
||||
github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/insomniacslk/dhcp v0.0.0-20240129002554-15c9b8791914 h1:kD8PseueGeYiid/Mmcv17Q0Qqicc4F46jcX22L/e/Hs=
|
||||
@@ -270,8 +264,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.7.4 h1:9wKznZrhWa2QiHL+NjTSPP6yjl3451BX3imWDnokYlg=
|
||||
github.com/jackc/pgx/v5 v5.7.4/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ=
|
||||
github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs=
|
||||
github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jagottsicher/termcolor v1.0.2 h1:fo0c51pQSuLBN1+yVX2ZE+hE+P7ULb/TY8eRowJnrsM=
|
||||
@@ -293,13 +287,14 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU=
|
||||
github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ=
|
||||
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk=
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
@@ -322,9 +317,8 @@ github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8
|
||||
github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
@@ -350,16 +344,10 @@ github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc
|
||||
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
|
||||
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
|
||||
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
||||
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
|
||||
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
|
||||
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
|
||||
github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
|
||||
@@ -370,23 +358,25 @@ github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25 h1:9bCMuD3Tc
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25/go.mod h1:eDjgYHYDJbPLBLsyZ6qRaugP0mX8vePOhZ5id1fdzJw=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/opencontainers/runc v1.3.0 h1:cvP7xbEvD0QQAs0nZKLzkVog2OPZhI/V2w3WmTmUSXI=
|
||||
github.com/opencontainers/runc v1.3.0/go.mod h1:9wbWt42gV+KRxKRVVugNP6D5+PQciRbenB4fLVsqGPs=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/opencontainers/runc v1.2.3 h1:fxE7amCzfZflJO2lHXf4y/y8M1BoAqp+FVmG19oYB80=
|
||||
github.com/opencontainers/runc v1.2.3/go.mod h1:nSxcWUydXrsBZVYNSkTjoQ/N6rcyTtn+1SD5D4+kRIM=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw=
|
||||
github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/ory/dockertest/v3 v3.11.0 h1:OiHcxKAvSDUwsEVh2BjxQQc/5EHz9n0va9awCtNGuyA=
|
||||
github.com/ory/dockertest/v3 v3.11.0/go.mod h1:VIPxS1gwT9NpPOrfD3rACs8Y9Z7yhzO4SB194iUDnUI=
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
|
||||
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
|
||||
github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a h1:S+AGcmAESQ0pXCUNnRH7V+bOUIgkSX5qVt2cNKCrm0Q=
|
||||
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20241211131331-93ee7e083c43 h1:ah1dvbqPMN5+ocrg/ZSgZ6k8bOk+kcZQ7fnyx6UvOm4=
|
||||
github.com/petermattis/goid v0.0.0-20241211131331-93ee7e083c43/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1 h1:EMacvLRUd2O1K0eWod27ZP5CY1iTNkhBDLSN+Q4JEvA=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1/go.mod h1:qXbiq/2X4ZUMMshsqlWyTHOcw7ns+GZmlqZZN05ZHcQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
@@ -398,12 +388,13 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4=
|
||||
github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
|
||||
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
|
||||
github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/pterm/pterm v0.12.27/go.mod h1:PhQ89w4i95rhgE+xedAoqous6K9X+r6aSOI2eFF7DZI=
|
||||
@@ -413,51 +404,54 @@ github.com/pterm/pterm v0.12.31/go.mod h1:32ZAWZVXD7ZfG0s8qqHXePte42kdz8ECtRyEej
|
||||
github.com/pterm/pterm v0.12.33/go.mod h1:x+h2uL+n7CP/rel9+bImHD5lF3nM9vJj80k9ybiiTTE=
|
||||
github.com/pterm/pterm v0.12.36/go.mod h1:NjiL09hFhT/vWjQHSj1athJpx6H8cjpHXNAK5bUw8T8=
|
||||
github.com/pterm/pterm v0.12.40/go.mod h1:ffwPLwlbXxP+rxT0GsgDTzS3y3rmpAO1NMjUkGTYf8s=
|
||||
github.com/pterm/pterm v0.12.81 h1:ju+j5I2++FO1jBKMmscgh5h5DPFDFMB7epEjSoKehKA=
|
||||
github.com/pterm/pterm v0.12.81/go.mod h1:TyuyrPjnxfwP+ccJdBTeWHtd/e0ybQHkOS/TakajZCw=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.1.0 h1:x9eHRl4QhZFIPJ17yl4KKW9xLyVWbb3/Yq4SXpjF71U=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.1.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
|
||||
github.com/pterm/pterm v0.12.80 h1:mM55B+GnKUnLMUSqhdINe4s6tOuVQIetQ3my8JGyAIg=
|
||||
github.com/pterm/pterm v0.12.80/go.mod h1:c6DeF9bSnOSeFPZlfs4ZRAFcf5SCoTwvwQ5xaKGQlHo=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.4.0 h1:DuVBAdXuGFHv8adVXjWWZ63pJq+NRXOWVXlKDBZ+mJ4=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.4.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
|
||||
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
||||
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0=
|
||||
github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs=
|
||||
github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k=
|
||||
github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk=
|
||||
github.com/samber/lo v1.51.0 h1:kysRYLbHy/MB7kQZf5DSN50JHmMsNEdeY24VzJFu7wI=
|
||||
github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
|
||||
github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk=
|
||||
github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0=
|
||||
github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc=
|
||||
github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU=
|
||||
github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU=
|
||||
github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U=
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA=
|
||||
github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo=
|
||||
github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk=
|
||||
github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
|
||||
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
|
||||
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
||||
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
|
||||
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.20.0-alpha.6 h1:f65Cr/+2qk4GfHC0xqT/isoupQppwN5+VLRztUGTDbY=
|
||||
github.com/spf13/viper v1.20.0-alpha.6/go.mod h1:CGBZzv0c9fOUASm6rfus4wdeIjR/04NOLq1P4KRhX3k=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
@@ -472,28 +466,28 @@ github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP
|
||||
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e/go.mod h1:XrBNfAFN+pwoWuksbFS9Ccxnopa15zJGgXRFN90l3K4=
|
||||
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8Jj4P4c1a3CtQyMaTVCznlkLZI++hok4=
|
||||
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg=
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 h1:SRL6irQkKGQKKLzvQP/ke/2ZuB7Py5+XuqtOgSj+iMM=
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ=
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 h1:rXZGgEa+k2vJM8xT0PoSKfVXwFGPQ3z3CJfmnHJkZZw=
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ=
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio=
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8=
|
||||
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33 h1:idh63uw+gsG05HwjZsAENCG4KZfyvjK03bpjxa5qRRk=
|
||||
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo=
|
||||
github.com/tailscale/hujson v0.0.0-20241010212012-29efb4a0184b h1:MNaGusDfB1qxEsl6iVb33Gbe777IKzPP5PDta0xGC8M=
|
||||
github.com/tailscale/hujson v0.0.0-20241010212012-29efb4a0184b/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo=
|
||||
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU=
|
||||
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0=
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA=
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc=
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d h1:mnqtPWYyvNiPU9l9tzO2YbHXU/xV664XthZYA26lOiE=
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d/go.mod h1:9BzmlFc3OLqLzLTF/5AY+BMs+clxMqyhSGzgXIm8mNI=
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694 h1:95eIP97c88cqAFU/8nURjgI9xxPbD+Ci6mY/a79BI/w=
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694/go.mod h1:veguaG8tVg1H/JG5RfpoUW41I+O8ClPElo/fTYr8mMk=
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97 h1:JJkDnrAhHvOCttk8z9xeZzcDlzzkRA7+Duxj9cwOyxk=
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97/go.mod h1:9jS8HxwsP2fU4ESZ7DZL+fpH/U66EVlVMzdgznH12RM=
|
||||
github.com/tailscale/setec v0.0.0-20240930150730-e6eb93658ed3 h1:Zk341hE1rcVUcDwA9XKmed2acHGGlbeFQzje6gvkuFo=
|
||||
github.com/tailscale/setec v0.0.0-20240930150730-e6eb93658ed3/go.mod h1:nexjfRM8veJVJ5PTbqYI2YrUj/jbk3deffEHO3DH9Q4=
|
||||
github.com/tailscale/squibble v0.0.0-20240909231413-32a80b9743f7 h1:nfklwaP8uNz2IbUygSKOQ1aDzzRRRLaIbPpnQWUUMGc=
|
||||
github.com/tailscale/squibble v0.0.0-20240909231413-32a80b9743f7/go.mod h1:YH/J7n7jNZOq10nTxxPANv2ha/Eg47/6J5b7NnOYAhQ=
|
||||
github.com/tailscale/tailsql v0.0.0-20241211062219-bf96884c6a49 h1:QFXXdoiYFiUS7a6DH7zE6Uacz3wMzH/1/VvWLnR9To4=
|
||||
github.com/tailscale/tailsql v0.0.0-20241211062219-bf96884c6a49/go.mod h1:IX3F8T6iILmg94hZGkkOf6rmjIHJCXNVqxOpiSUwHQQ=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
|
||||
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M=
|
||||
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251 h1:h/41LFTrwMxB9Xvvug0kRdQCU5TlV1+pAMQw0ZtDE3U=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 h1:BcEJP2ewTIK2ZCsqgl6YGpuO6+oKqqag5HHb7ehljKw=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4=
|
||||
github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek=
|
||||
github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg=
|
||||
github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA=
|
||||
@@ -502,8 +496,8 @@ github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e h1:IWllFTiDjjLIf2
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e/go.mod h1:d7u6HkTYKSv5m6MCKkOQlHwaShTMl3HjqSGW3XtVhXM=
|
||||
github.com/tink-crypto/tink-go/v2 v2.1.0 h1:QXFBguwMwTIaU17EgZpEJWsUSc60b1BAGTzBIoMdmok=
|
||||
github.com/tink-crypto/tink-go/v2 v2.1.0/go.mod h1:y1TnYFt1i2eZVfx4OGc+C+EMp4CoKWAw2VSEuoicHHI=
|
||||
github.com/u-root/u-root v0.14.0 h1:Ka4T10EEML7dQ5XDvO9c3MBN8z4nuSnGjcd1jmU2ivg=
|
||||
github.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1WMluqE=
|
||||
github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs=
|
||||
github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI=
|
||||
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM=
|
||||
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
@@ -526,26 +520,22 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ=
|
||||
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
|
||||
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
|
||||
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
|
||||
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
|
||||
go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
|
||||
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
|
||||
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
|
||||
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek=
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g=
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M=
|
||||
@@ -555,20 +545,29 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA=
|
||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.24.0 h1:AN7zRgVsbvmTfNyqIbbOraYL8mSwcKncEj8ofjgzcMQ=
|
||||
golang.org/x/image v0.24.0/go.mod h1:4b/ITuLfqYq1hqZcjofwctIhi7sZh2WaCjvsBNjjya8=
|
||||
golang.org/x/image v0.23.0 h1:HseQ7c2OpPKTPVzNjG5fwJsOTCiiwS4QdsYi5XU6H68=
|
||||
golang.org/x/image v0.23.0/go.mod h1:wJJBTdLfCCf3tiHa1fNxpZmUI4mmoZvwMCPP0ddoNKY=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
|
||||
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -577,21 +576,26 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
||||
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
|
||||
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
|
||||
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -602,6 +606,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -615,8 +620,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab h1:BMkEEWYOjkvOX7+YKOGbp6jCyQ5pR2j0Ah47p1Vdsx4=
|
||||
golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -624,8 +629,8 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
|
||||
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
|
||||
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
|
||||
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
@@ -633,18 +638,23 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
|
||||
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
|
||||
golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4=
|
||||
golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
|
||||
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
|
||||
golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE=
|
||||
golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -653,15 +663,26 @@ golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeu
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
|
||||
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 h1:ChAdCYNQFDk5fYvFZMywKLIijG7TC2m1C2CMEu11G3o=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484/go.mod h1:KRUmxRI4JmbpAm8gcZM4Jsffi859fo5LQjILwuqj9z8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 h1:Z7FRVJPSMaHQxD0uXU8WdgFh8PseLM8Q8NzhnpMrBhQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI=
|
||||
google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ=
|
||||
google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
@@ -674,47 +695,49 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
|
||||
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
|
||||
gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs=
|
||||
gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
|
||||
gorm.io/driver/postgres v1.5.11 h1:ubBVAfbKEUld/twyKZ0IYn9rSQh448EdelLYk9Mv314=
|
||||
gorm.io/driver/postgres v1.5.11/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI=
|
||||
gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8=
|
||||
gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k=
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM=
|
||||
honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
|
||||
honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
|
||||
gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8=
|
||||
gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I=
|
||||
honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs=
|
||||
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
|
||||
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
||||
modernc.org/cc/v4 v4.25.2 h1:T2oH7sZdGvTaie0BRNFbIYsabzCxUQg8nLqCdQ2i0ic=
|
||||
modernc.org/cc/v4 v4.25.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.25.1 h1:TFSzPrAGmDsdnhT9X2UrcPMI3N/mJ9/X9ykKXwLhDsU=
|
||||
modernc.org/ccgo/v4 v4.25.1/go.mod h1:njjuAYiPflywOOrm3B7kCB444ONP5pAVr8PIEoE0uDw=
|
||||
modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ=
|
||||
modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
|
||||
modernc.org/ccgo/v4 v4.19.2 h1:lwQZgvboKD0jBwdaeVCTouxhxAyN6iawF3STraAal8Y=
|
||||
modernc.org/ccgo/v4 v4.19.2/go.mod h1:ysS3mxiMV38XGRTTcgo0DQTeTmAO4oCmJl1nX9VFI3s=
|
||||
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
|
||||
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
|
||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||
modernc.org/libc v1.62.1 h1:s0+fv5E3FymN8eJVmnk0llBe6rOxCu/DEU+XygRbS8s=
|
||||
modernc.org/libc v1.62.1/go.mod h1:iXhATfJQLjG3NWy56a6WVU73lWOcdYVxsvwCgoPljuo=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.10.0 h1:fzumd51yQ1DxcOxSO+S6X7+QTuVU+n8/Aj7swYjFfC4=
|
||||
modernc.org/memory v1.10.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
|
||||
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.37.0 h1:s1TMe7T3Q3ovQiK2Ouz4Jwh7dw4ZDqbebSDTlSJdfjI=
|
||||
modernc.org/sqlite v1.37.0/go.mod h1:5YiWv+YviqGMuGw4V+PNplcyaJ5v+vQd7TQOgkACoJM=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw=
|
||||
modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU=
|
||||
modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U=
|
||||
modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w=
|
||||
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
|
||||
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
|
||||
modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E=
|
||||
modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU=
|
||||
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
||||
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
||||
modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=
|
||||
modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss=
|
||||
modernc.org/sqlite v1.34.5 h1:Bb6SR13/fjp15jt70CL4f18JIN7p7dnMExd+UFnF15g=
|
||||
modernc.org/sqlite v1.34.5/go.mod h1:YLuNmX9NKs8wRNK2ko1LW1NGYcc9FkBO69JOt1AR9JE=
|
||||
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
|
||||
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k=
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
|
||||
tailscale.com v1.84.2 h1:v6aM4RWUgYiV52LRAx6ET+dlGnvO/5lnqPXb7/pMnR0=
|
||||
tailscale.com v1.84.2/go.mod h1:6/S63NMAhmncYT/1zIPDJkvCuZwMw+JnUuOfSPNazpo=
|
||||
zgo.at/zcache/v2 v2.2.0 h1:K29/IPjMniZfveYE+IRXfrl11tMzHkIPuyGrfVZ2fGo=
|
||||
zgo.at/zcache/v2 v2.2.0/go.mod h1:gyCeoLVo01QjDZynjime8xUGHHMbsLiPyUTBpDGd4Gk=
|
||||
tailscale.com v1.80.0 h1:7joWtDtdHEHJvGmOag10RNITKp1I4Ts7Hrn6pU33/1I=
|
||||
tailscale.com v1.80.0/go.mod h1:4tasV1xjJAMHuX2xWMWAnXEmlrAA6M3w1xnc32DlpMk=
|
||||
zgo.at/zcache/v2 v2.1.0 h1:USo+ubK+R4vtjw4viGzTe/zjXyPw6R7SK/RL3epBBxs=
|
||||
zgo.at/zcache/v2 v2.1.0/go.mod h1:gyCeoLVo01QjDZynjime8xUGHHMbsLiPyUTBpDGd4Gk=
|
||||
zombiezen.com/go/postgrestest v1.0.1 h1:aXoADQAJmZDU3+xilYVut0pHhgc0sF8ZspPW9gFNwP4=
|
||||
zombiezen.com/go/postgrestest v1.0.1/go.mod h1:marlZezr+k2oSJrvXHnZUs1olHqpE9czlz8ZYkVxliQ=
|
||||
|
||||
363
hscontrol/app.go
363
hscontrol/app.go
@@ -5,6 +5,7 @@ import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // nolint
|
||||
@@ -19,6 +20,7 @@ import (
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/gorilla/mux"
|
||||
grpcMiddleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
grpcRuntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
@@ -29,11 +31,12 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/dns"
|
||||
"github.com/juanfont/headscale/hscontrol/mapper"
|
||||
"github.com/juanfont/headscale/hscontrol/notifier"
|
||||
"github.com/juanfont/headscale/hscontrol/state"
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
zerolog "github.com/philip-bui/grpc-zerolog"
|
||||
"github.com/pkg/profile"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
zl "github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"golang.org/x/crypto/acme"
|
||||
@@ -47,11 +50,13 @@ import (
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/reflection"
|
||||
"google.golang.org/grpc/status"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/util/dnsname"
|
||||
zcache "zgo.at/zcache/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -69,22 +74,32 @@ const (
|
||||
updateInterval = 5 * time.Second
|
||||
privateKeyFileMode = 0o600
|
||||
headscaleDirPerm = 0o700
|
||||
|
||||
registerCacheExpiration = time.Minute * 15
|
||||
registerCacheCleanup = time.Minute * 20
|
||||
)
|
||||
|
||||
// Headscale represents the base app of the service.
|
||||
type Headscale struct {
|
||||
cfg *types.Config
|
||||
state *state.State
|
||||
db *db.HSDatabase
|
||||
ipAlloc *db.IPAllocator
|
||||
noisePrivateKey *key.MachinePrivate
|
||||
ephemeralGC *db.EphemeralGarbageCollector
|
||||
|
||||
DERPMap *tailcfg.DERPMap
|
||||
DERPServer *derpServer.DERPServer
|
||||
|
||||
// Things that generate changes
|
||||
polManOnce sync.Once
|
||||
polMan policy.PolicyManager
|
||||
extraRecordMan *dns.ExtraRecordsMan
|
||||
mapper *mapper.Mapper
|
||||
nodeNotifier *notifier.Notifier
|
||||
authProvider AuthProvider
|
||||
|
||||
mapper *mapper.Mapper
|
||||
nodeNotifier *notifier.Notifier
|
||||
|
||||
registrationCache *zcache.Cache[types.RegistrationID, types.RegisterNode]
|
||||
|
||||
authProvider AuthProvider
|
||||
|
||||
pollNetMapStreamWG sync.WaitGroup
|
||||
}
|
||||
@@ -109,42 +124,42 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
return nil, fmt.Errorf("failed to read or create Noise protocol private key: %w", err)
|
||||
}
|
||||
|
||||
s, err := state.NewState(cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init state: %w", err)
|
||||
}
|
||||
registrationCache := zcache.New[types.RegistrationID, types.RegisterNode](
|
||||
registerCacheExpiration,
|
||||
registerCacheCleanup,
|
||||
)
|
||||
|
||||
app := Headscale{
|
||||
cfg: cfg,
|
||||
noisePrivateKey: noisePrivateKey,
|
||||
registrationCache: registrationCache,
|
||||
pollNetMapStreamWG: sync.WaitGroup{},
|
||||
nodeNotifier: notifier.NewNotifier(cfg),
|
||||
state: s,
|
||||
}
|
||||
|
||||
// Initialize ephemeral garbage collector
|
||||
ephemeralGC := db.NewEphemeralGarbageCollector(func(ni types.NodeID) {
|
||||
node, err := app.state.GetNodeByID(ni)
|
||||
if err != nil {
|
||||
log.Err(err).Uint64("node.id", ni.Uint64()).Msgf("failed to get ephemeral node for deletion")
|
||||
return
|
||||
}
|
||||
app.db, err = db.NewHeadscaleDatabase(
|
||||
cfg.Database,
|
||||
cfg.BaseDomain,
|
||||
registrationCache,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
policyChanged, err := app.state.DeleteNode(node)
|
||||
if err != nil {
|
||||
app.ipAlloc, err = db.NewIPAllocator(app.db, cfg.PrefixV4, cfg.PrefixV6, cfg.IPAllocation)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
app.ephemeralGC = db.NewEphemeralGarbageCollector(func(ni types.NodeID) {
|
||||
if err := app.db.DeleteEphemeralNode(ni); err != nil {
|
||||
log.Err(err).Uint64("node.id", ni.Uint64()).Msgf("failed to delete ephemeral node")
|
||||
return
|
||||
}
|
||||
|
||||
// Send policy update notifications if needed
|
||||
if policyChanged {
|
||||
ctx := types.NotifyCtx(context.Background(), "ephemeral-gc-policy", node.Hostname)
|
||||
app.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
|
||||
}
|
||||
|
||||
log.Debug().Uint64("node.id", ni.Uint64()).Msgf("deleted ephemeral node")
|
||||
})
|
||||
app.ephemeralGC = ephemeralGC
|
||||
|
||||
if err = app.loadPolicyManager(); err != nil {
|
||||
return nil, fmt.Errorf("failed to load ACL policy: %w", err)
|
||||
}
|
||||
|
||||
var authProvider AuthProvider
|
||||
authProvider = NewAuthProviderWeb(cfg.ServerURL)
|
||||
@@ -155,8 +170,10 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
ctx,
|
||||
cfg.ServerURL,
|
||||
&cfg.OIDC,
|
||||
app.state,
|
||||
app.db,
|
||||
app.nodeNotifier,
|
||||
app.ipAlloc,
|
||||
app.polMan,
|
||||
)
|
||||
if err != nil {
|
||||
if cfg.OIDC.OnlyStartIfOIDCIsAvailable {
|
||||
@@ -175,14 +192,10 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
|
||||
var magicDNSDomains []dnsname.FQDN
|
||||
if cfg.PrefixV4 != nil {
|
||||
magicDNSDomains = append(
|
||||
magicDNSDomains,
|
||||
util.GenerateIPv4DNSRootDomain(*cfg.PrefixV4)...)
|
||||
magicDNSDomains = append(magicDNSDomains, util.GenerateIPv4DNSRootDomain(*cfg.PrefixV4)...)
|
||||
}
|
||||
if cfg.PrefixV6 != nil {
|
||||
magicDNSDomains = append(
|
||||
magicDNSDomains,
|
||||
util.GenerateIPv6DNSRootDomain(*cfg.PrefixV6)...)
|
||||
magicDNSDomains = append(magicDNSDomains, util.GenerateIPv6DNSRootDomain(*cfg.PrefixV6)...)
|
||||
}
|
||||
|
||||
// we might have routes already from Split DNS
|
||||
@@ -207,14 +220,6 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
)
|
||||
}
|
||||
|
||||
if cfg.DERP.ServerVerifyClients {
|
||||
t := http.DefaultTransport.(*http.Transport) //nolint:forcetypeassert
|
||||
t.RegisterProtocol(
|
||||
derpServer.DerpVerifyScheme,
|
||||
derpServer.NewDERPVerifyTransport(app.handleVerifyRequest),
|
||||
)
|
||||
}
|
||||
|
||||
embeddedDERPServer, err := derpServer.NewDERPServer(
|
||||
cfg.ServerURL,
|
||||
key.NodePrivate(*derpServerKey),
|
||||
@@ -265,7 +270,14 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
var update types.StateUpdate
|
||||
var changed bool
|
||||
|
||||
lastExpiryCheck, update, changed = h.state.ExpireExpiredNodes(lastExpiryCheck)
|
||||
if err := h.db.Write(func(tx *gorm.DB) error {
|
||||
lastExpiryCheck, update, changed = db.ExpireExpiredNodes(tx, lastExpiryCheck)
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Error().Err(err).Msg("database error while expiring nodes")
|
||||
continue
|
||||
}
|
||||
|
||||
if changed {
|
||||
log.Trace().Interface("nodes", update.ChangePatches).Msgf("expiring nodes")
|
||||
@@ -276,16 +288,16 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
|
||||
case <-derpTickerChan:
|
||||
log.Info().Msg("Fetching DERPMap updates")
|
||||
derpMap := derp.GetDERPMap(h.cfg.DERP)
|
||||
h.DERPMap = derp.GetDERPMap(h.cfg.DERP)
|
||||
if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
|
||||
region, _ := h.DERPServer.GenerateRegion()
|
||||
derpMap.Regions[region.RegionID] = ®ion
|
||||
h.DERPMap.Regions[region.RegionID] = ®ion
|
||||
}
|
||||
|
||||
ctx := types.NotifyCtx(context.Background(), "derpmap-update", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StateDERPUpdated,
|
||||
DERPMap: derpMap,
|
||||
DERPMap: h.DERPMap,
|
||||
})
|
||||
|
||||
case records, ok := <-extraRecordsUpdate:
|
||||
@@ -295,9 +307,11 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
h.cfg.TailcfgDNSConfig.ExtraRecords = records
|
||||
|
||||
ctx := types.NotifyCtx(context.Background(), "dns-extrarecord", "all")
|
||||
// TODO(kradalby): We can probably do better than sending a full update here,
|
||||
// but for now this will ensure that all of the nodes get the new records.
|
||||
h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
// TODO(kradalby): We can probably do better than sending a full update here,
|
||||
// but for now this will ensure that all of the nodes get the new records.
|
||||
Type: types.StateFullUpdate,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -344,7 +358,7 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
||||
)
|
||||
}
|
||||
|
||||
valid, err := h.state.ValidateAPIKey(strings.TrimPrefix(token, AuthPrefix))
|
||||
valid, err := h.db.ValidateAPIKey(strings.TrimPrefix(token, AuthPrefix))
|
||||
if err != nil {
|
||||
return ctx, status.Error(codes.Internal, "failed to validate token")
|
||||
}
|
||||
@@ -389,7 +403,7 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
return
|
||||
}
|
||||
|
||||
valid, err := h.state.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))
|
||||
valid, err := h.db.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
@@ -445,13 +459,11 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
||||
router := mux.NewRouter()
|
||||
router.Use(prometheusMiddleware)
|
||||
|
||||
router.HandleFunc(ts2021UpgradePath, h.NoiseUpgradeHandler).
|
||||
Methods(http.MethodPost, http.MethodGet)
|
||||
router.HandleFunc(ts2021UpgradePath, h.NoiseUpgradeHandler).Methods(http.MethodPost, http.MethodGet)
|
||||
|
||||
router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/register/{registration_id}", h.authProvider.RegisterHandler).
|
||||
Methods(http.MethodGet)
|
||||
router.HandleFunc("/register/{registration_id}", h.authProvider.RegisterHandler).Methods(http.MethodGet)
|
||||
|
||||
if provider, ok := h.authProvider.(*AuthProviderOIDC); ok {
|
||||
router.HandleFunc("/oidc/callback", provider.OIDCCallbackHandler).Methods(http.MethodGet)
|
||||
@@ -472,7 +484,7 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
||||
router.HandleFunc("/derp", h.DERPServer.DERPHandler)
|
||||
router.HandleFunc("/derp/probe", derpServer.DERPProbeHandler)
|
||||
router.HandleFunc("/derp/latency-check", derpServer.DERPProbeHandler)
|
||||
router.HandleFunc("/bootstrap-dns", derpServer.DERPBootstrapDNSHandler(h.state.DERPMap()))
|
||||
router.HandleFunc("/bootstrap-dns", derpServer.DERPBootstrapDNSHandler(h.DERPMap))
|
||||
}
|
||||
|
||||
apiRouter := router.PathPrefix("/api").Subrouter()
|
||||
@@ -484,57 +496,54 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
||||
return router
|
||||
}
|
||||
|
||||
// // TODO(kradalby): Do a variant of this, and polman which only updates the node that has changed.
|
||||
// // Maybe we should attempt a new in memory state and not go via the DB?
|
||||
// // Maybe this should be implemented as an event bus?
|
||||
// // A bool is returned indicating if a full update was sent to all nodes
|
||||
// func usersChangedHook(db *db.HSDatabase, polMan policy.PolicyManager, notif *notifier.Notifier) error {
|
||||
// users, err := db.ListUsers()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// TODO(kradalby): Do a variant of this, and polman which only updates the node that has changed.
|
||||
// Maybe we should attempt a new in memory state and not go via the DB?
|
||||
func usersChangedHook(db *db.HSDatabase, polMan policy.PolicyManager, notif *notifier.Notifier) error {
|
||||
users, err := db.ListUsers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// changed, err := polMan.SetUsers(users)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
changed, err := polMan.SetUsers(users)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if changed {
|
||||
// ctx := types.NotifyCtx(context.Background(), "acl-users-change", "all")
|
||||
// notif.NotifyAll(ctx, types.UpdateFull())
|
||||
// }
|
||||
if changed {
|
||||
ctx := types.NotifyCtx(context.Background(), "acl-users-change", "all")
|
||||
notif.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StateFullUpdate,
|
||||
})
|
||||
}
|
||||
|
||||
// return nil
|
||||
// }
|
||||
return nil
|
||||
}
|
||||
|
||||
// // TODO(kradalby): Do a variant of this, and polman which only updates the node that has changed.
|
||||
// // Maybe we should attempt a new in memory state and not go via the DB?
|
||||
// // Maybe this should be implemented as an event bus?
|
||||
// // A bool is returned indicating if a full update was sent to all nodes
|
||||
// func nodesChangedHook(
|
||||
// db *db.HSDatabase,
|
||||
// polMan policy.PolicyManager,
|
||||
// notif *notifier.Notifier,
|
||||
// ) (bool, error) {
|
||||
// nodes, err := db.ListNodes()
|
||||
// if err != nil {
|
||||
// return false, err
|
||||
// }
|
||||
// TODO(kradalby): Do a variant of this, and polman which only updates the node that has changed.
|
||||
// Maybe we should attempt a new in memory state and not go via the DB?
|
||||
// A bool is returned indicating if a full update was sent to all nodes
|
||||
func nodesChangedHook(db *db.HSDatabase, polMan policy.PolicyManager, notif *notifier.Notifier) (bool, error) {
|
||||
nodes, err := db.ListNodes()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// filterChanged, err := polMan.SetNodes(nodes)
|
||||
// if err != nil {
|
||||
// return false, err
|
||||
// }
|
||||
filterChanged, err := polMan.SetNodes(nodes)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// if filterChanged {
|
||||
// ctx := types.NotifyCtx(context.Background(), "acl-nodes-change", "all")
|
||||
// notif.NotifyAll(ctx, types.UpdateFull())
|
||||
if filterChanged {
|
||||
ctx := types.NotifyCtx(context.Background(), "acl-nodes-change", "all")
|
||||
notif.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StateFullUpdate,
|
||||
})
|
||||
|
||||
// return true, nil
|
||||
// }
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// return false, nil
|
||||
// }
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Serve launches the HTTP and gRPC server service Headscale and the API.
|
||||
func (h *Headscale) Serve() error {
|
||||
@@ -557,15 +566,15 @@ func (h *Headscale) Serve() error {
|
||||
spew.Dump(h.cfg)
|
||||
}
|
||||
|
||||
log.Info().Str("version", types.Version).Str("commit", types.GitCommitHash).Msg("Starting Headscale")
|
||||
log.Info().
|
||||
Caller().
|
||||
Str("minimum_version", capver.TailscaleVersion(capver.MinSupportedCapabilityVersion)).
|
||||
Msg("Clients with a lower minimum version will be rejected")
|
||||
|
||||
// Fetch an initial DERP Map before we start serving
|
||||
h.mapper = mapper.NewMapper(h.state, h.cfg, h.nodeNotifier)
|
||||
h.DERPMap = derp.GetDERPMap(h.cfg.DERP)
|
||||
h.mapper = mapper.NewMapper(h.db, h.cfg, h.DERPMap, h.nodeNotifier, h.polMan)
|
||||
|
||||
// TODO(kradalby): fix state part.
|
||||
if h.cfg.DERP.ServerEnabled {
|
||||
// When embedded DERP is enabled we always need a STUN server
|
||||
if h.cfg.DERP.STUNAddr == "" {
|
||||
@@ -578,13 +587,13 @@ func (h *Headscale) Serve() error {
|
||||
}
|
||||
|
||||
if h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
|
||||
h.state.DERPMap().Regions[region.RegionID] = ®ion
|
||||
h.DERPMap.Regions[region.RegionID] = ®ion
|
||||
}
|
||||
|
||||
go h.DERPServer.ServeSTUN()
|
||||
}
|
||||
|
||||
if len(h.state.DERPMap().Regions) == 0 {
|
||||
if len(h.DERPMap.Regions) == 0 {
|
||||
return errEmptyInitialDERPMap
|
||||
}
|
||||
|
||||
@@ -593,7 +602,7 @@ func (h *Headscale) Serve() error {
|
||||
// around between restarts, they will reconnect and the GC will
|
||||
// be cancelled.
|
||||
go h.ephemeralGC.Start()
|
||||
ephmNodes, err := h.state.ListEphemeralNodes()
|
||||
ephmNodes, err := h.db.ListEphemeralNodes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list ephemeral nodes: %w", err)
|
||||
}
|
||||
@@ -716,10 +725,12 @@ func (h *Headscale) Serve() error {
|
||||
log.Info().Msgf("Enabling remote gRPC at %s", h.cfg.GRPCAddr)
|
||||
|
||||
grpcOptions := []grpc.ServerOption{
|
||||
grpc.ChainUnaryInterceptor(
|
||||
h.grpcAuthenticationInterceptor,
|
||||
// Uncomment to debug grpc communication.
|
||||
// zerolog.NewUnaryServerInterceptor(),
|
||||
grpc.UnaryInterceptor(
|
||||
grpcMiddleware.ChainUnaryServer(
|
||||
h.grpcAuthenticationInterceptor,
|
||||
// Uncomment to debug grpc communication.
|
||||
// zerolog.NewUnaryServerInterceptor(),
|
||||
),
|
||||
),
|
||||
}
|
||||
|
||||
@@ -781,12 +792,26 @@ func (h *Headscale) Serve() error {
|
||||
log.Info().
|
||||
Msgf("listening and serving HTTP on: %s", h.cfg.Addr)
|
||||
|
||||
debugMux := http.NewServeMux()
|
||||
debugMux.Handle("/debug/pprof/", http.DefaultServeMux)
|
||||
debugMux.HandleFunc("/debug/notifier", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(h.nodeNotifier.String()))
|
||||
})
|
||||
debugMux.Handle("/metrics", promhttp.Handler())
|
||||
|
||||
debugHTTPServer := &http.Server{
|
||||
Addr: h.cfg.MetricsAddr,
|
||||
Handler: debugMux,
|
||||
ReadTimeout: types.HTTPTimeout,
|
||||
WriteTimeout: 0,
|
||||
}
|
||||
|
||||
debugHTTPListener, err := net.Listen("tcp", h.cfg.MetricsAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bind to TCP address: %w", err)
|
||||
}
|
||||
|
||||
debugHTTPServer := h.debugHTTPServer()
|
||||
errorGroup.Go(func() error { return debugHTTPServer.Serve(debugHTTPListener) })
|
||||
|
||||
log.Info().
|
||||
@@ -822,16 +847,24 @@ func (h *Headscale) Serve() error {
|
||||
case syscall.SIGHUP:
|
||||
log.Info().
|
||||
Str("signal", sig.String()).
|
||||
Msg("Received SIGHUP, reloading ACL policy")
|
||||
Msg("Received SIGHUP, reloading ACL and Config")
|
||||
|
||||
if h.cfg.Policy.IsEmpty() {
|
||||
continue
|
||||
}
|
||||
|
||||
changed, err := h.state.ReloadPolicy()
|
||||
if err := h.loadPolicyManager(); err != nil {
|
||||
log.Error().Err(err).Msg("failed to reload Policy")
|
||||
}
|
||||
|
||||
pol, err := h.policyBytes()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("reloading policy")
|
||||
continue
|
||||
log.Error().Err(err).Msg("failed to get policy blob")
|
||||
}
|
||||
|
||||
changed, err := h.polMan.SetPolicy(pol)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to set new policy")
|
||||
}
|
||||
|
||||
if changed {
|
||||
@@ -839,7 +872,9 @@ func (h *Headscale) Serve() error {
|
||||
Msg("ACL policy successfully reloaded, notifying nodes of change")
|
||||
|
||||
ctx := types.NotifyCtx(context.Background(), "acl-sighup", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StateFullUpdate,
|
||||
})
|
||||
}
|
||||
default:
|
||||
info := func(msg string) { log.Info().Msg(msg) }
|
||||
@@ -896,7 +931,7 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
// Close db connections
|
||||
info("closing database connection")
|
||||
err = h.state.Close()
|
||||
err = h.db.Close()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to close db")
|
||||
}
|
||||
@@ -996,10 +1031,13 @@ func notFoundHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
body, _ := io.ReadAll(req.Body)
|
||||
|
||||
log.Trace().
|
||||
Interface("header", req.Header).
|
||||
Interface("proto", req.Proto).
|
||||
Interface("url", req.URL).
|
||||
Bytes("body", body).
|
||||
Msg("Request did not match")
|
||||
writer.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
@@ -1047,3 +1085,90 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
||||
|
||||
return &machineKey, nil
|
||||
}
|
||||
|
||||
// policyBytes returns the appropriate policy for the
|
||||
// current configuration as a []byte array.
|
||||
func (h *Headscale) policyBytes() ([]byte, error) {
|
||||
switch h.cfg.Policy.Mode {
|
||||
case types.PolicyModeFile:
|
||||
path := h.cfg.Policy.Path
|
||||
|
||||
// It is fine to start headscale without a policy file.
|
||||
if len(path) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
absPath := util.AbsolutePathFromConfigPath(path)
|
||||
policyFile, err := os.Open(absPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer policyFile.Close()
|
||||
|
||||
return io.ReadAll(policyFile)
|
||||
|
||||
case types.PolicyModeDB:
|
||||
p, err := h.db.GetPolicy()
|
||||
if err != nil {
|
||||
if errors.Is(err, types.ErrPolicyNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p.Data == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return []byte(p.Data), err
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unsupported policy mode: %s", h.cfg.Policy.Mode)
|
||||
}
|
||||
|
||||
func (h *Headscale) loadPolicyManager() error {
|
||||
var errOut error
|
||||
h.polManOnce.Do(func() {
|
||||
// Validate and reject configuration that would error when applied
|
||||
// when creating a map response. This requires nodes, so there is still
|
||||
// a scenario where they might be allowed if the server has no nodes
|
||||
// yet, but it should help for the general case and for hot reloading
|
||||
// configurations.
|
||||
// Note that this check is only done for file-based policies in this function
|
||||
// as the database-based policies are checked in the gRPC API where it is not
|
||||
// allowed to be written to the database.
|
||||
nodes, err := h.db.ListNodes()
|
||||
if err != nil {
|
||||
errOut = fmt.Errorf("loading nodes from database to validate policy: %w", err)
|
||||
return
|
||||
}
|
||||
users, err := h.db.ListUsers()
|
||||
if err != nil {
|
||||
errOut = fmt.Errorf("loading users from database to validate policy: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
pol, err := h.policyBytes()
|
||||
if err != nil {
|
||||
errOut = fmt.Errorf("loading policy bytes: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
h.polMan, err = policy.NewPolicyManager(pol, users, nodes)
|
||||
if err != nil {
|
||||
errOut = fmt.Errorf("creating policy manager: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(nodes) > 0 {
|
||||
_, err = h.polMan.SSHPolicy(nodes[0])
|
||||
if err != nil {
|
||||
errOut = fmt.Errorf("verifying SSH rules: %w", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return errOut
|
||||
}
|
||||
|
||||
@@ -9,7 +9,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/db"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
@@ -26,7 +28,7 @@ func (h *Headscale) handleRegister(
|
||||
regReq tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
node, err := h.state.GetNodeByNodeKey(regReq.NodeKey)
|
||||
node, err := h.db.GetNodeByNodeKey(regReq.NodeKey)
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, fmt.Errorf("looking up node in database: %w", err)
|
||||
}
|
||||
@@ -41,7 +43,10 @@ func (h *Headscale) handleRegister(
|
||||
}
|
||||
|
||||
if regReq.Followup != "" {
|
||||
return h.waitForFollowup(ctx, regReq)
|
||||
// TODO(kradalby): Does this need to return an error of some sort?
|
||||
// Maybe if the registration fails down the line it can be sent
|
||||
// on the channel and returned here?
|
||||
h.waitForFollowup(ctx, regReq)
|
||||
}
|
||||
|
||||
if regReq.Auth != nil && regReq.Auth.AuthKey != "" {
|
||||
@@ -82,143 +87,176 @@ func (h *Headscale) handleExistingNode(
|
||||
// If the request expiry is in the past, we consider it a logout.
|
||||
if requestExpiry.Before(time.Now()) {
|
||||
if node.IsEphemeral() {
|
||||
policyChanged, err := h.state.DeleteNode(node)
|
||||
changedNodes, err := h.db.DeleteNode(node, h.nodeNotifier.LikelyConnectedMap())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("deleting ephemeral node: %w", err)
|
||||
}
|
||||
|
||||
// Send policy update notifications if needed
|
||||
if policyChanged {
|
||||
ctx := types.NotifyCtx(context.Background(), "auth-logout-ephemeral-policy", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
|
||||
} else {
|
||||
ctx := types.NotifyCtx(context.Background(), "logout-ephemeral", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, types.UpdatePeerRemoved(node.ID))
|
||||
ctx := types.NotifyCtx(context.Background(), "logout-ephemeral", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StatePeerRemoved,
|
||||
Removed: []types.NodeID{node.ID},
|
||||
})
|
||||
if changedNodes != nil {
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: changedNodes,
|
||||
})
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
expired = true
|
||||
}
|
||||
|
||||
n, policyChanged, err := h.state.SetNodeExpiry(node.ID, requestExpiry)
|
||||
err := h.db.NodeSetExpiry(node.ID, requestExpiry)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting node expiry: %w", err)
|
||||
}
|
||||
|
||||
// Send policy update notifications if needed
|
||||
if policyChanged {
|
||||
ctx := types.NotifyCtx(context.Background(), "auth-expiry-policy", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
|
||||
} else {
|
||||
ctx := types.NotifyCtx(context.Background(), "logout-expiry", "na")
|
||||
h.nodeNotifier.NotifyWithIgnore(ctx, types.UpdateExpire(node.ID, requestExpiry), node.ID)
|
||||
}
|
||||
|
||||
return nodeToRegisterResponse(n), nil
|
||||
ctx := types.NotifyCtx(context.Background(), "logout-expiry", "na")
|
||||
h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdateExpire(node.ID, requestExpiry), node.ID)
|
||||
}
|
||||
|
||||
return nodeToRegisterResponse(node), nil
|
||||
}
|
||||
|
||||
func nodeToRegisterResponse(node *types.Node) *tailcfg.RegisterResponse {
|
||||
return &tailcfg.RegisterResponse{
|
||||
// TODO(kradalby): Only send for user-owned nodes
|
||||
// and not tagged nodes when tags is working.
|
||||
User: *node.User.TailscaleUser(),
|
||||
Login: *node.User.TailscaleLogin(),
|
||||
NodeKeyExpired: node.IsExpired(),
|
||||
NodeKeyExpired: expired,
|
||||
|
||||
// Headscale does not implement the concept of machine authorization
|
||||
// so we always return true here.
|
||||
// Revisit this if #2176 gets implemented.
|
||||
MachineAuthorized: true,
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) waitForFollowup(
|
||||
ctx context.Context,
|
||||
regReq tailcfg.RegisterRequest,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
) {
|
||||
fu, err := url.Parse(regReq.Followup)
|
||||
if err != nil {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "invalid followup URL", err)
|
||||
return
|
||||
}
|
||||
|
||||
followupReg, err := types.RegistrationIDFromString(strings.ReplaceAll(fu.Path, "/register/", ""))
|
||||
if err != nil {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "invalid registration ID", err)
|
||||
return
|
||||
}
|
||||
|
||||
if reg, ok := h.state.GetRegistrationCacheEntry(followupReg); ok {
|
||||
if reg, ok := h.registrationCache.Get(followupReg); ok {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "registration timed out", err)
|
||||
case node := <-reg.Registered:
|
||||
if node == nil {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "node not found", nil)
|
||||
}
|
||||
return nodeToRegisterResponse(node), nil
|
||||
return
|
||||
case <-reg.Registered:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, NewHTTPError(http.StatusNotFound, "followup registration not found", nil)
|
||||
// canUsePreAuthKey checks if a pre auth key can be used.
|
||||
func canUsePreAuthKey(pak *types.PreAuthKey) error {
|
||||
if pak == nil {
|
||||
return NewHTTPError(http.StatusUnauthorized, "invalid authkey", nil)
|
||||
}
|
||||
if pak.Expiration != nil && pak.Expiration.Before(time.Now()) {
|
||||
return NewHTTPError(http.StatusUnauthorized, "authkey expired", nil)
|
||||
}
|
||||
|
||||
// we don't need to check if has been used before
|
||||
if pak.Reusable {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pak.Used {
|
||||
return NewHTTPError(http.StatusUnauthorized, "authkey already used", nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Headscale) handleRegisterWithAuthKey(
|
||||
regReq tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
node, changed, err := h.state.HandleNodeFromPreAuthKey(
|
||||
regReq,
|
||||
machineKey,
|
||||
)
|
||||
pak, err := h.db.GetPreAuthKey(regReq.Auth.AuthKey)
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "invalid pre auth key", nil)
|
||||
}
|
||||
var perr types.PAKError
|
||||
if errors.As(err, &perr) {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, perr.Error(), nil)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// This is a bit of a back and forth, but we have a bit of a chicken and egg
|
||||
// dependency here.
|
||||
// Because the way the policy manager works, we need to have the node
|
||||
// in the database, then add it to the policy manager and then we can
|
||||
// approve the route. This means we get this dance where the node is
|
||||
// first added to the database, then we add it to the policy manager via
|
||||
// nodesChangedHook and then we can auto approve the routes.
|
||||
// As that only approves the struct object, we need to save it again and
|
||||
// ensure we send an update.
|
||||
// This works, but might be another good candidate for doing some sort of
|
||||
// eventbus.
|
||||
routesChanged := h.state.AutoApproveRoutes(node)
|
||||
if _, _, err := h.state.SaveNode(node); err != nil {
|
||||
return nil, fmt.Errorf("saving auto approved routes to node: %w", err)
|
||||
err = canUsePreAuthKey(pak)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if routesChanged {
|
||||
nodeToRegister := types.Node{
|
||||
Hostname: regReq.Hostinfo.Hostname,
|
||||
UserID: pak.User.ID,
|
||||
User: pak.User,
|
||||
MachineKey: machineKey,
|
||||
NodeKey: regReq.NodeKey,
|
||||
Hostinfo: regReq.Hostinfo,
|
||||
LastSeen: ptr.To(time.Now()),
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
|
||||
// TODO(kradalby): This should not be set on the node,
|
||||
// they should be looked up through the key, which is
|
||||
// attached to the node.
|
||||
ForcedTags: pak.Proto().GetAclTags(),
|
||||
AuthKey: pak,
|
||||
AuthKeyID: &pak.ID,
|
||||
}
|
||||
|
||||
if !regReq.Expiry.IsZero() {
|
||||
nodeToRegister.Expiry = ®Req.Expiry
|
||||
}
|
||||
|
||||
ipv4, ipv6, err := h.ipAlloc.Next()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("allocating IPs: %w", err)
|
||||
}
|
||||
|
||||
node, err := db.Write(h.db.DB, func(tx *gorm.DB) (*types.Node, error) {
|
||||
node, err := db.RegisterNode(tx,
|
||||
nodeToRegister,
|
||||
ipv4, ipv6,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("registering node: %w", err)
|
||||
}
|
||||
|
||||
if !pak.Reusable {
|
||||
err = db.UsePreAuthKey(tx, pak)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("using pre auth key: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return node, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
updateSent, err := nodesChangedHook(h.db, h.polMan, h.nodeNotifier)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("nodes changed hook: %w", err)
|
||||
}
|
||||
|
||||
if !updateSent {
|
||||
ctx := types.NotifyCtx(context.Background(), "node updated", node.Hostname)
|
||||
h.nodeNotifier.NotifyAll(ctx, types.UpdatePeerChanged(node.ID))
|
||||
} else if changed {
|
||||
ctx := types.NotifyCtx(context.Background(), "node created", node.Hostname)
|
||||
h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
|
||||
} else {
|
||||
// Existing node re-registering without route changes
|
||||
// Still need to notify peers about the node being active again
|
||||
// Use UpdateFull to ensure all peers get complete peer maps
|
||||
ctx := types.NotifyCtx(context.Background(), "node re-registered", node.Hostname)
|
||||
h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdatePeerAdded(node.ID))
|
||||
}
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
MachineAuthorized: true,
|
||||
NodeKeyExpired: node.IsExpired(),
|
||||
User: *node.User.TailscaleUser(),
|
||||
Login: *node.User.TailscaleLogin(),
|
||||
User: *pak.User.TailscaleUser(),
|
||||
Login: *pak.User.TailscaleLogin(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -231,7 +269,7 @@ func (h *Headscale) handleRegisterInteractive(
|
||||
return nil, fmt.Errorf("generating registration ID: %w", err)
|
||||
}
|
||||
|
||||
nodeToRegister := types.RegisterNode{
|
||||
newNode := types.RegisterNode{
|
||||
Node: types.Node{
|
||||
Hostname: regReq.Hostinfo.Hostname,
|
||||
MachineKey: machineKey,
|
||||
@@ -239,16 +277,16 @@ func (h *Headscale) handleRegisterInteractive(
|
||||
Hostinfo: regReq.Hostinfo,
|
||||
LastSeen: ptr.To(time.Now()),
|
||||
},
|
||||
Registered: make(chan *types.Node),
|
||||
Registered: make(chan struct{}),
|
||||
}
|
||||
|
||||
if !regReq.Expiry.IsZero() {
|
||||
nodeToRegister.Node.Expiry = ®Req.Expiry
|
||||
newNode.Node.Expiry = ®Req.Expiry
|
||||
}
|
||||
|
||||
h.state.SetRegistrationCacheEntry(
|
||||
h.registrationCache.Set(
|
||||
registrationId,
|
||||
nodeToRegister,
|
||||
newNode,
|
||||
)
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
package types
|
||||
package hscontrol
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
)
|
||||
|
||||
func TestCanUsePreAuthKey(t *testing.T) {
|
||||
@@ -15,13 +16,13 @@ func TestCanUsePreAuthKey(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pak *PreAuthKey
|
||||
pak *types.PreAuthKey
|
||||
wantErr bool
|
||||
err PAKError
|
||||
err HTTPError
|
||||
}{
|
||||
{
|
||||
name: "valid reusable key",
|
||||
pak: &PreAuthKey{
|
||||
pak: &types.PreAuthKey{
|
||||
Reusable: true,
|
||||
Used: false,
|
||||
Expiration: &future,
|
||||
@@ -30,7 +31,7 @@ func TestCanUsePreAuthKey(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "valid non-reusable key",
|
||||
pak: &PreAuthKey{
|
||||
pak: &types.PreAuthKey{
|
||||
Reusable: false,
|
||||
Used: false,
|
||||
Expiration: &future,
|
||||
@@ -39,27 +40,27 @@ func TestCanUsePreAuthKey(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "expired key",
|
||||
pak: &PreAuthKey{
|
||||
pak: &types.PreAuthKey{
|
||||
Reusable: false,
|
||||
Used: false,
|
||||
Expiration: &past,
|
||||
},
|
||||
wantErr: true,
|
||||
err: PAKError("authkey expired"),
|
||||
err: NewHTTPError(http.StatusUnauthorized, "authkey expired", nil),
|
||||
},
|
||||
{
|
||||
name: "used non-reusable key",
|
||||
pak: &PreAuthKey{
|
||||
pak: &types.PreAuthKey{
|
||||
Reusable: false,
|
||||
Used: true,
|
||||
Expiration: &future,
|
||||
},
|
||||
wantErr: true,
|
||||
err: PAKError("authkey already used"),
|
||||
err: NewHTTPError(http.StatusUnauthorized, "authkey already used", nil),
|
||||
},
|
||||
{
|
||||
name: "used reusable key",
|
||||
pak: &PreAuthKey{
|
||||
pak: &types.PreAuthKey{
|
||||
Reusable: true,
|
||||
Used: true,
|
||||
Expiration: &future,
|
||||
@@ -68,7 +69,7 @@ func TestCanUsePreAuthKey(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "no expiration date",
|
||||
pak: &PreAuthKey{
|
||||
pak: &types.PreAuthKey{
|
||||
Reusable: false,
|
||||
Used: false,
|
||||
Expiration: nil,
|
||||
@@ -79,39 +80,38 @@ func TestCanUsePreAuthKey(t *testing.T) {
|
||||
name: "nil preauth key",
|
||||
pak: nil,
|
||||
wantErr: true,
|
||||
err: PAKError("invalid authkey"),
|
||||
err: NewHTTPError(http.StatusUnauthorized, "invalid authkey", nil),
|
||||
},
|
||||
{
|
||||
name: "expired and used key",
|
||||
pak: &PreAuthKey{
|
||||
pak: &types.PreAuthKey{
|
||||
Reusable: false,
|
||||
Used: true,
|
||||
Expiration: &past,
|
||||
},
|
||||
wantErr: true,
|
||||
err: PAKError("authkey expired"),
|
||||
err: NewHTTPError(http.StatusUnauthorized, "authkey expired", nil),
|
||||
},
|
||||
{
|
||||
name: "no expiration and used key",
|
||||
pak: &PreAuthKey{
|
||||
pak: &types.PreAuthKey{
|
||||
Reusable: false,
|
||||
Used: true,
|
||||
Expiration: nil,
|
||||
},
|
||||
wantErr: true,
|
||||
err: PAKError("authkey already used"),
|
||||
err: NewHTTPError(http.StatusUnauthorized, "authkey already used", nil),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.pak.Validate()
|
||||
err := canUsePreAuthKey(tt.pak)
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("expected error but got none")
|
||||
} else {
|
||||
var httpErr PAKError
|
||||
ok := errors.As(err, &httpErr)
|
||||
httpErr, ok := err.(HTTPError)
|
||||
if !ok {
|
||||
t.Errorf("expected HTTPError but got %T", err)
|
||||
} else {
|
||||
@@ -1,7 +1,6 @@
|
||||
package capver
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
@@ -32,7 +31,9 @@ func tailscaleVersSorted() []string {
|
||||
|
||||
func capVersSorted() []tailcfg.CapabilityVersion {
|
||||
capVers := xmaps.Keys(capVerToTailscaleVer)
|
||||
slices.Sort(capVers)
|
||||
sort.Slice(capVers, func(i, j int) bool {
|
||||
return capVers[i] < capVers[j]
|
||||
})
|
||||
return capVers
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
package capver
|
||||
|
||||
// Generated DO NOT EDIT
|
||||
//Generated DO NOT EDIT
|
||||
|
||||
import "tailscale.com/tailcfg"
|
||||
|
||||
var tailscaleToCapVer = map[string]tailcfg.CapabilityVersion{
|
||||
"v1.44.3": 63,
|
||||
"v1.56.1": 82,
|
||||
"v1.58.0": 85,
|
||||
"v1.58.1": 85,
|
||||
"v1.58.2": 85,
|
||||
"v1.60.0": 87,
|
||||
"v1.60.1": 87,
|
||||
"v1.62.0": 88,
|
||||
@@ -31,23 +36,21 @@ var tailscaleToCapVer = map[string]tailcfg.CapabilityVersion{
|
||||
"v1.78.0": 109,
|
||||
"v1.78.1": 109,
|
||||
"v1.80.0": 113,
|
||||
"v1.80.1": 113,
|
||||
"v1.80.2": 113,
|
||||
"v1.80.3": 113,
|
||||
"v1.82.0": 115,
|
||||
"v1.82.5": 115,
|
||||
}
|
||||
|
||||
|
||||
var capVerToTailscaleVer = map[tailcfg.CapabilityVersion]string{
|
||||
87: "v1.60.0",
|
||||
88: "v1.62.0",
|
||||
90: "v1.64.0",
|
||||
95: "v1.66.0",
|
||||
97: "v1.68.0",
|
||||
102: "v1.70.0",
|
||||
104: "v1.72.0",
|
||||
106: "v1.74.0",
|
||||
109: "v1.78.0",
|
||||
113: "v1.80.0",
|
||||
115: "v1.82.0",
|
||||
63: "v1.44.3",
|
||||
82: "v1.56.1",
|
||||
85: "v1.58.0",
|
||||
87: "v1.60.0",
|
||||
88: "v1.62.0",
|
||||
90: "v1.64.0",
|
||||
95: "v1.66.0",
|
||||
97: "v1.68.0",
|
||||
102: "v1.70.0",
|
||||
104: "v1.72.0",
|
||||
106: "v1.74.0",
|
||||
109: "v1.78.0",
|
||||
113: "v1.80.0",
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user