Compare commits

..

1 Commits

Author SHA1 Message Date
Juan Font
4b4200e8a5 Add ko-fi sponsor button 2022-12-22 17:14:06 +01:00
54 changed files with 129 additions and 2156 deletions

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestAuthKeyLogoutAndRelogin
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestAuthKeyLogoutAndRelogin$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestAuthWebFlowAuthenticationPingAll
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestAuthWebFlowAuthenticationPingAll$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestAuthWebFlowLogoutAndRelogin
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestAuthWebFlowLogoutAndRelogin$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestCreateTailscale
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestCreateTailscale$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestEnablingRoutes
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestEnablingRoutes$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestHeadscale
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestHeadscale$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNamespaceCommand
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNamespaceCommand$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestOIDCAuthenticationPingAll
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestOIDCAuthenticationPingAll$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestOIDCExpireNodes
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestOIDCExpireNodes$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestPingAllByHostname
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestPingAllByHostname$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestPingAllByIP
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestPingAllByIP$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestPreAuthKeyCommand
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestPreAuthKeyCommand$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestPreAuthKeyCommandReusableEphemeral
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestPreAuthKeyCommandReusableEphemeral$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestPreAuthKeyCommandWithoutExpiry
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestPreAuthKeyCommandWithoutExpiry$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestResolveMagicDNS
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestResolveMagicDNS$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSSHIsBlockedInACL
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSSHIsBlockedInACL$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSSHMultipleNamespacesAllToAll
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSSHMultipleNamespacesAllToAll$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSSHNoSSHConfigured
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSSHNoSSHConfigured$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSSHOneNamespaceAllToAll
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSSHOneNamespaceAllToAll$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSSNamespaceOnlyIsolation
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSSNamespaceOnlyIsolation$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestTaildrop
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestTaildrop$"

View File

@@ -1,47 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestTailscaleNodesJoiningHeadcale
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestTailscaleNodesJoiningHeadcale$"

View File

@@ -0,0 +1,35 @@
name: Integration Test v2 - kradalby
on: [pull_request]
jobs:
integration-test-v2-kradalby:
runs-on: [self-hosted, linux, x64, nixos, docker]
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
# We currently only run one job at the time, so make sure
# we just wipe all containers and all networks.
- name: Clean up Docker
if: steps.changed-files.outputs.any_changed == 'true'
run: |
docker kill $(docker ps -q) || true
docker network prune -f || true
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: nix develop --command -- make test_integration_v2_general

View File

@@ -1 +0,0 @@
.github/workflows/test-integration-v2*

View File

@@ -9,10 +9,6 @@
- Set `db_ssl` to false by default [#1052](https://github.com/juanfont/headscale/pull/1052)
- Fix duplicate nodes due to incorrect implementation of the protocol [#1058](https://github.com/juanfont/headscale/pull/1058)
- Report if a machine is online in CLI more accurately [#1062](https://github.com/juanfont/headscale/pull/1062)
- Added config option for custom DNS records [#1035](https://github.com/juanfont/headscale/pull/1035)
- Expire nodes based on OIDC token expiry [#1067](https://github.com/juanfont/headscale/pull/1067)
- Remove ephemeral nodes on logout [#1098](https://github.com/juanfont/headscale/pull/1098)
- Performance improvements in ACLs [#1129](https://github.com/juanfont/headscale/pull/1129)
## 0.17.1 (2022-12-05)

61
app.go
View File

@@ -162,7 +162,6 @@ func NewHeadscale(cfg *Config) (*Headscale, error) {
aclRules: tailcfg.FilterAllowAll, // default allowall
registrationCache: registrationCache,
pollNetMapStreamWG: sync.WaitGroup{},
lastStateChange: xsync.NewMapOf[time.Time](),
}
err = app.initDB()
@@ -218,15 +217,6 @@ func (h *Headscale) expireEphemeralNodes(milliSeconds int64) {
}
}
// expireExpiredMachines expires machines that have an explicit expiry set
// after that expiry time has passed.
func (h *Headscale) expireExpiredMachines(milliSeconds int64) {
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
for range ticker.C {
h.expireExpiredMachinesWorker()
}
}
func (h *Headscale) failoverSubnetRoutes(milliSeconds int64) {
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
for range ticker.C {
@@ -258,7 +248,8 @@ func (h *Headscale) expireEphemeralNodesWorker() {
expiredFound := false
for _, machine := range machines {
if machine.isEphemeral() && machine.LastSeen != nil &&
if machine.AuthKey != nil && machine.LastSeen != nil &&
machine.AuthKey.Ephemeral &&
time.Now().
After(machine.LastSeen.Add(h.cfg.EphemeralNodeInactivityTimeout)) {
expiredFound = true
@@ -282,53 +273,6 @@ func (h *Headscale) expireEphemeralNodesWorker() {
}
}
func (h *Headscale) expireExpiredMachinesWorker() {
namespaces, err := h.ListNamespaces()
if err != nil {
log.Error().Err(err).Msg("Error listing namespaces")
return
}
for _, namespace := range namespaces {
machines, err := h.ListMachinesInNamespace(namespace.Name)
if err != nil {
log.Error().
Err(err).
Str("namespace", namespace.Name).
Msg("Error listing machines in namespace")
return
}
expiredFound := false
for index, machine := range machines {
if machine.isExpired() &&
machine.Expiry.After(h.getLastStateChange(namespace)) {
expiredFound = true
err := h.ExpireMachine(&machines[index])
if err != nil {
log.Error().
Err(err).
Str("machine", machine.Hostname).
Str("name", machine.GivenName).
Msg("🤮 Cannot expire machine")
} else {
log.Info().
Str("machine", machine.Hostname).
Str("name", machine.GivenName).
Msg("Machine successfully expired")
}
}
}
if expiredFound {
h.setLastStateChangeToNow()
}
}
}
func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
req interface{},
info *grpc.UnaryServerInfo,
@@ -550,7 +494,6 @@ func (h *Headscale) Serve() error {
}
go h.expireEphemeralNodes(updateInterval)
go h.expireExpiredMachines(updateInterval)
go h.failoverSubnetRoutes(updateInterval)

View File

@@ -1,114 +0,0 @@
package main
//go:generate go run ./main.go
import (
"bytes"
"fmt"
"log"
"os"
"text/template"
)
var (
jobFileNameTemplate = `test-integration-v2-%s.yaml`
jobTemplate = template.Must(template.New("jobTemplate").Parse(`
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - {{.Name}}
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^{{.Name}}$"
`))
)
const workflowFilePerm = 0o600
func main() {
type testConfig struct {
Name string
}
// TODO(kradalby): automatic fetch tests at runtime
tests := []string{
"TestAuthKeyLogoutAndRelogin",
"TestAuthWebFlowAuthenticationPingAll",
"TestAuthWebFlowLogoutAndRelogin",
"TestCreateTailscale",
"TestEnablingRoutes",
"TestHeadscale",
"TestNamespaceCommand",
"TestOIDCAuthenticationPingAll",
"TestOIDCExpireNodes",
"TestPingAllByHostname",
"TestPingAllByIP",
"TestPreAuthKeyCommand",
"TestPreAuthKeyCommandReusableEphemeral",
"TestPreAuthKeyCommandWithoutExpiry",
"TestResolveMagicDNS",
"TestSSHIsBlockedInACL",
"TestSSHMultipleNamespacesAllToAll",
"TestSSHNoSSHConfigured",
"TestSSHOneNamespaceAllToAll",
"TestSSNamespaceOnlyIsolation",
"TestTaildrop",
"TestTailscaleNodesJoiningHeadcale",
}
for _, test := range tests {
var content bytes.Buffer
if err := jobTemplate.Execute(&content, testConfig{
Name: test,
}); err != nil {
log.Fatalf("failed to render template: %s", err)
}
path := "../../.github/workflows/" + fmt.Sprintf(jobFileNameTemplate, test)
err := os.WriteFile(path, content.Bytes(), workflowFilePerm)
if err != nil {
log.Fatalf("failed to write github job: %s", err)
}
}
}

View File

@@ -16,11 +16,10 @@ const (
errMockOidcClientIDNotDefined = Error("MOCKOIDC_CLIENT_ID not defined")
errMockOidcClientSecretNotDefined = Error("MOCKOIDC_CLIENT_SECRET not defined")
errMockOidcPortNotDefined = Error("MOCKOIDC_PORT not defined")
accessTTL = 10 * time.Minute
refreshTTL = 60 * time.Minute
)
var accessTTL = 2 * time.Minute
func init() {
rootCmd.AddCommand(mockOidcCmd)
}
@@ -55,16 +54,6 @@ func mockOIDC() error {
if portStr == "" {
return errMockOidcPortNotDefined
}
accessTTLOverride := os.Getenv("MOCKOIDC_ACCESS_TTL")
if accessTTLOverride != "" {
newTTL, err := time.ParseDuration(accessTTLOverride)
if err != nil {
return err
}
accessTTL = newTTL
}
log.Info().Msgf("Access token TTL: %s", accessTTL)
port, err := strconv.Atoi(portStr)
if err != nil {

View File

@@ -475,7 +475,6 @@ func nodesToPtables(
"IP addresses",
"Ephemeral",
"Last seen",
"Expiration",
"Online",
"Expired",
}
@@ -502,12 +501,8 @@ func nodesToPtables(
}
var expiry time.Time
var expiryTime string
if machine.Expiry != nil {
expiry = machine.Expiry.AsTime()
expiryTime = expiry.Format("2006-01-02 15:04:05")
} else {
expiryTime = "N/A"
}
var machineKey key.MachinePublic
@@ -588,7 +583,6 @@ func nodesToPtables(
strings.Join([]string{IPV4Address, IPV6Address}, ", "),
strconv.FormatBool(ephemeral),
lastSeenTime,
expiryTime,
online,
expired,
}

View File

@@ -63,11 +63,6 @@ noise:
# List of IP prefixes to allocate tailaddresses from.
# Each prefix consists of either an IPv4 or IPv6 address,
# and the associated prefix length, delimited by a slash.
# While this looks like it can take arbitrary values, it
# needs to be within IP ranges supported by the Tailscale
# client.
# IPv6: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#LL81C52-L81C71
# IPv4: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#L33
ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
@@ -240,17 +235,6 @@ dns_config:
# Search domains to inject.
domains: []
# Extra DNS records
# so far only A-records are supported (on the tailscale side)
# See https://github.com/juanfont/headscale/blob/main/docs/dns-records.md#Limitations
# extra_records:
# - name: "grafana.myvpn.example.com"
# type: "A"
# value: "100.64.0.3"
#
# # you can also put it in one line
# - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.3" }
# Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
# Only works if there is at least a nameserver defined.
magic_dns: true

View File

@@ -408,7 +408,7 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
}
if viper.IsSet("dns_config.restricted_nameservers") {
if len(dnsConfig.Resolvers) > 0 {
if len(dnsConfig.Nameservers) > 0 {
dnsConfig.Routes = make(map[string][]*dnstype.Resolver)
restrictedDNS := viper.GetStringMapStringSlice(
"dns_config.restricted_nameservers",
@@ -440,7 +440,7 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
if viper.IsSet("dns_config.domains") {
domains := viper.GetStringSlice("dns_config.domains")
if len(dnsConfig.Resolvers) > 0 {
if len(dnsConfig.Nameservers) > 0 {
dnsConfig.Domains = domains
} else if domains != nil {
log.Warn().
@@ -448,20 +448,6 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
}
}
if viper.IsSet("dns_config.extra_records") {
var extraRecords []tailcfg.DNSRecord
err := viper.UnmarshalKey("dns_config.extra_records", &extraRecords)
if err != nil {
log.Error().
Str("func", "getDNSConfig").
Err(err).
Msgf("Could not parse dns_config.extra_records")
}
dnsConfig.ExtraRecords = extraRecords
}
if viper.IsSet("dns_config.magic_dns") {
dnsConfig.Proxied = viper.GetBool("dns_config.magic_dns")
}

View File

@@ -12,7 +12,6 @@ please ask on [Discord](https://discord.gg/c84AZQhmpx) instead of opening an Iss
- [Running headscale on Linux](running-headscale-linux.md)
- [Control headscale remotely](remote-cli.md)
- [Using a Windows client with headscale](windows-client.md)
- [Configuring OIDC](oidc.md)
### References
@@ -30,7 +29,6 @@ written by community members. It is _not_ verified by `headscale` developers.
- [Running headscale in a container](running-headscale-container.md)
- [Running headscale on OpenBSD](running-headscale-openbsd.md)
- [Running headscale behind a reverse proxy](reverse-proxy.md)
- [Set Custom DNS records](dns-records.md)
## Misc

View File

@@ -1,83 +0,0 @@
# Setting custom DNS records
## Goal
This documentation has the goal of showing how a user can set custom DNS records with `headscale`s magic dns.
An example use case is to serve apps on the same host via a reverse proxy like NGINX, in this case a Prometheus monitoring stack. This allows to nicely access the service with "http://grafana.myvpn.example.com" instead of the hostname and portnum combination "http://hostname-in-magic-dns.myvpn.example.com:3000".
## Setup
### 1. Change the configuration
1. Change the `config.yaml` to contain the desired records like so:
```yaml
dns_config:
...
extra_records:
- name: "prometheus.myvpn.example.com"
type: "A"
value: "100.64.0.3"
- name: "grafana.myvpn.example.com"
type: "A"
value: "100.64.0.3"
...
```
2. Restart your headscale instance.
Beware of the limitations listed later on!
### 2. Verify that the records are set
You can use a DNS querying tool of your choice on one of your hosts to verify that your newly set records are actually available in MagicDNS, here we used [`dig`](https://man.archlinux.org/man/dig.1.en):
```
$ dig grafana.myvpn.example.com
; <<>> DiG 9.18.10 <<>> grafana.myvpn.example.com
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 44054
;; flags: qr rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 65494
;; QUESTION SECTION:
;grafana.myvpn.example.com. IN A
;; ANSWER SECTION:
grafana.myvpn.example.com. 593 IN A 100.64.0.3
;; Query time: 0 msec
;; SERVER: 127.0.0.53#53(127.0.0.53) (UDP)
;; WHEN: Sat Dec 31 11:46:55 CET 2022
;; MSG SIZE rcvd: 66
```
### 3. Optional: Setup the reverse proxy
The motivating example here was to be able to access internal monitoring services on the same host without specifying a port:
```
server {
listen 80;
listen [::]:80;
server_name grafana.myvpn.example.com;
location / {
proxy_pass http://localhost:3000;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
```
## Limitations
[Not all types of records are supported](https://github.com/tailscale/tailscale/blob/main/ipn/ipnlocal/local.go#L2891-L2909), especially no CNAME records.

View File

@@ -1,137 +0,0 @@
# Configuring Headscale to use OIDC authentication
In order to authenticate users through a centralized solution one must enable the OIDC integration.
Known limitations:
- No dynamic ACL support
- OIDC groups cannot be used in ACLs
## Basic configuration
In your `config.yaml`, customize this to your liking:
```yaml
oidc:
# Block further startup until the OIDC provider is healthy and available
only_start_if_oidc_is_available: true
# Specified by your OIDC provider
issuer: "https://your-oidc.issuer.com/path"
# Specified/generated by your OIDC provider
client_id: "your-oidc-client-id"
client_secret: "your-oidc-client-secret"
# Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
# parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
scope: ["openid", "profile", "email", "custom"]
# Optional: Passed on to the browser login request used to tweak behaviour for the OIDC provider
extra_params:
domain_hint: example.com
# Optional: List allowed principal domains and/or users. If an authenticated user's domain is not in this list,
# the authentication request will be rejected.
allowed_domains:
- example.com
# Optional. Note that groups from Keycloak have a leading '/'.
allowed_groups:
- /headscale
# Optional.
allowed_users:
- alice@example.com
# If `strip_email_domain` is set to `true`, the domain part of the username email address will be removed.
# This will transform `first-name.last-name@example.com` to the namespace `first-name.last-name`
# If `strip_email_domain` is set to `false` the domain part will NOT be removed resulting to the following
# namespace: `first-name.last-name.example.com`
strip_email_domain: true
```
## Azure AD example
In order to integrate Headscale with Azure Active Directory, we'll need to provision an App Registration with the correct scopes and redirect URI. Here with Terraform:
```hcl
resource "azuread_application" "headscale" {
display_name = "Headscale"
sign_in_audience = "AzureADMyOrg"
fallback_public_client_enabled = false
required_resource_access {
// Microsoft Graph
resource_app_id = "00000003-0000-0000-c000-000000000000"
resource_access {
// scope: profile
id = "14dad69e-099b-42c9-810b-d002981feec1"
type = "Scope"
}
resource_access {
// scope: openid
id = "37f7f235-527c-4136-accd-4a02d197296e"
type = "Scope"
}
resource_access {
// scope: email
id = "64a6cdd6-aab1-4aaf-94b8-3cc8405e90d0"
type = "Scope"
}
}
web {
# Points at your running Headscale instance
redirect_uris = ["https://headscale.example.com/oidc/callback"]
implicit_grant {
access_token_issuance_enabled = false
id_token_issuance_enabled = true
}
}
group_membership_claims = ["SecurityGroup"]
optional_claims {
# Expose group memberships
id_token {
name = "groups"
}
}
}
resource "azuread_application_password" "headscale-application-secret" {
display_name = "Headscale Server"
application_object_id = azuread_application.headscale.object_id
}
resource "azuread_service_principal" "headscale" {
application_id = azuread_application.headscale.application_id
}
resource "azuread_service_principal_password" "headscale" {
service_principal_id = azuread_service_principal.headscale.id
end_date_relative = "44640h"
}
output "headscale_client_id" {
value = azuread_application.headscale.application_id
}
output "headscale_client_secret" {
value = azuread_application_password.headscale-application-secret.value
}
```
And in your Headscale `config.yaml`:
```yaml
oidc:
issuer: "https://login.microsoftonline.com/<tenant-UUID>/v2.0"
client_id: "<client-id-from-terraform>"
client_secret: "<client-secret-from-terraform>"
# Optional: add "groups"
scope: ["openid", "profile", "email"]
extra_params:
# Use your own domain, associated with Azure AD
domain_hint: example.com
# Optional: Force the Azure AD account picker
prompt: select_account
```

View File

@@ -98,17 +98,3 @@ spec:
upgrade_configs:
- upgrade_type: tailscale-control-protocol
```
## Caddy
The following Caddyfile is all that is necessary to use Caddy as a reverse proxy for headscale, in combination with the `config.yaml` specifications above to disable headscale's built in TLS. Replace values as necessary - `<YOUR_SERVER_NAME>` should be the FQDN at which headscale will be served, and `<IP:PORT>` should be the IP address and port where headscale is running. In most cases, this will be `localhost:8080`.
```
<YOUR_SERVER_NAME> {
reverse_proxy <IP:PORT>
}
```
Caddy v2 will [automatically](https://caddyserver.com/docs/automatic-https) provision a certficate for your domain/subdomain, force HTTPS, and proxy websockets - no further configuration is necessary.
For a slightly more complex configuration which utilizes Docker containers to manage Caddy, Headscale, and Headscale-UI, [Guru Computing's guide](https://blog.gurucomputing.com.au/smart-vpns-with-headscale/) is an excellent reference.

View File

@@ -176,7 +176,6 @@ func (api headscaleV1APIServer) RegisterMachine(
machine, err := api.h.RegisterMachineFromAuthCallback(
request.GetKey(),
request.GetNamespace(),
nil,
RegisterMethodCLI,
)
if err != nil {

View File

@@ -1,16 +0,0 @@
# Integration testing
Headscale relies on integration testing to ensure we remain compatible with Tailscale.
This is typically performed by starting a Headscale server and running a test "scenario"
with an array of Tailscale clients and versions.
Headscale's test framework and the current set of scenarios are defined in this directory.
Tests are located in files ending with `_test.go` and the framework are located in the rest.
## Running integration tests on GitHub Actions
Each test currently runs as a separate workflows in GitHub actions, to add new test, add
the new test to the list in `../cmd/gh-action-integration-generator/main.go` and run
`go generate` inside `../cmd/gh-action-integration-generator/` and commit the result.

View File

@@ -9,10 +9,8 @@ import (
"log"
"net"
"net/http"
"net/netip"
"strconv"
"testing"
"time"
"github.com/juanfont/headscale"
"github.com/juanfont/headscale/integration/dockertestutil"
@@ -24,7 +22,7 @@ import (
const (
dockerContextPath = "../."
hsicOIDCMockHashLength = 6
defaultAccessTTL = 10 * time.Minute
oidcServerPort = 10000
)
var errStatusCodeNotOK = errors.New("status code not OK")
@@ -52,7 +50,7 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
"namespace1": len(TailscaleVersions),
}
oidcConfig, err := scenario.runMockOIDC(defaultAccessTTL)
oidcConfig, err := scenario.runMockOIDC()
if err != nil {
t.Errorf("failed to run mock OIDC server: %s", err)
}
@@ -89,7 +87,19 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
success := pingAll(t, allClients, allIps)
success := 0
for _, client := range allClients {
for _, ip := range allIps {
err := client.Ping(ip.String())
if err != nil {
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
} else {
success++
}
}
}
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
err = scenario.Shutdown()
@@ -98,74 +108,6 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
}
}
func TestOIDCExpireNodes(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
shortAccessTTL := 5 * time.Minute
baseScenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
scenario := AuthOIDCScenario{
Scenario: baseScenario,
}
spec := map[string]int{
"namespace1": len(TailscaleVersions),
}
oidcConfig, err := scenario.runMockOIDC(shortAccessTTL)
if err != nil {
t.Fatalf("failed to run mock OIDC server: %s", err)
}
oidcMap := map[string]string{
"HEADSCALE_OIDC_ISSUER": oidcConfig.Issuer,
"HEADSCALE_OIDC_CLIENT_ID": oidcConfig.ClientID,
"HEADSCALE_OIDC_CLIENT_SECRET": oidcConfig.ClientSecret,
"HEADSCALE_OIDC_STRIP_EMAIL_DOMAIN": fmt.Sprintf("%t", oidcConfig.StripEmaildomain),
}
err = scenario.CreateHeadscaleEnv(
spec,
hsic.WithTestName("oidcexpirenodes"),
hsic.WithConfigEnv(oidcMap),
hsic.WithHostnameAsServerURL(),
)
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
success := pingAll(t, allClients, allIps)
t.Logf("%d successful pings out of %d (before expiry)", success, len(allClients)*len(allIps))
// await all nodes being logged out after OIDC token expiry
scenario.WaitForTailscaleLogout()
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func (s *AuthOIDCScenario) CreateHeadscaleEnv(
namespaces map[string]int,
opts ...hsic.Option,
@@ -201,13 +143,7 @@ func (s *AuthOIDCScenario) CreateHeadscaleEnv(
return nil
}
func (s *AuthOIDCScenario) runMockOIDC(accessTTL time.Duration) (*headscale.OIDCConfig, error) {
port, err := dockertestutil.RandomFreeHostPort()
if err != nil {
log.Fatalf("could not find an open port: %s", err)
}
portNotation := fmt.Sprintf("%d/tcp", port)
func (s *AuthOIDCScenario) runMockOIDC() (*headscale.OIDCConfig, error) {
hash, _ := headscale.GenerateRandomStringDNSSafe(hsicOIDCMockHashLength)
hostname := fmt.Sprintf("hs-oidcmock-%s", hash)
@@ -215,17 +151,16 @@ func (s *AuthOIDCScenario) runMockOIDC(accessTTL time.Duration) (*headscale.OIDC
mockOidcOptions := &dockertest.RunOptions{
Name: hostname,
Cmd: []string{"headscale", "mockoidc"},
ExposedPorts: []string{portNotation},
ExposedPorts: []string{"10000/tcp"},
PortBindings: map[docker.Port][]docker.PortBinding{
docker.Port(portNotation): {{HostPort: strconv.Itoa(port)}},
"10000/tcp": {{HostPort: "10000"}},
},
Networks: []*dockertest.Network{s.Scenario.network},
Env: []string{
fmt.Sprintf("MOCKOIDC_ADDR=%s", hostname),
fmt.Sprintf("MOCKOIDC_PORT=%d", port),
"MOCKOIDC_PORT=10000",
"MOCKOIDC_CLIENT_ID=superclient",
"MOCKOIDC_CLIENT_SECRET=supersecret",
fmt.Sprintf("MOCKOIDC_ACCESS_TTL=%s", accessTTL.String()),
},
}
@@ -234,7 +169,7 @@ func (s *AuthOIDCScenario) runMockOIDC(accessTTL time.Duration) (*headscale.OIDC
ContextDir: dockerContextPath,
}
err = s.pool.RemoveContainerByName(hostname)
err := s.pool.RemoveContainerByName(hostname)
if err != nil {
return nil, err
}
@@ -249,7 +184,11 @@ func (s *AuthOIDCScenario) runMockOIDC(accessTTL time.Duration) (*headscale.OIDC
}
log.Println("Waiting for headscale mock oidc to be ready for tests")
hostEndpoint := fmt.Sprintf("%s:%d", s.mockOIDC.GetIPInNetwork(s.network), port)
hostEndpoint := fmt.Sprintf(
"%s:%s",
s.mockOIDC.GetIPInNetwork(s.network),
s.mockOIDC.GetPort(fmt.Sprintf("%d/tcp", oidcServerPort)),
)
if err := s.pool.Retry(func() error {
oidcConfigURL := fmt.Sprintf("http://%s/oidc/.well-known/openid-configuration", hostEndpoint)
@@ -276,11 +215,11 @@ func (s *AuthOIDCScenario) runMockOIDC(accessTTL time.Duration) (*headscale.OIDC
log.Printf("headscale mock oidc is ready for tests at %s", hostEndpoint)
return &headscale.OIDCConfig{
Issuer: fmt.Sprintf("http://%s/oidc", net.JoinHostPort(s.mockOIDC.GetIPInNetwork(s.network), strconv.Itoa(port))),
ClientID: "superclient",
ClientSecret: "supersecret",
StripEmaildomain: true,
OnlyStartIfOIDCIsAvailable: true,
Issuer: fmt.Sprintf("http://%s/oidc",
net.JoinHostPort(s.mockOIDC.GetIPInNetwork(s.network), strconv.Itoa(oidcServerPort))),
ClientID: "superclient",
ClientSecret: "supersecret",
StripEmaildomain: true,
}, nil
}
@@ -353,24 +292,6 @@ func (s *AuthOIDCScenario) runTailscaleUp(
return fmt.Errorf("failed to up tailscale node: %w", errNoNamespaceAvailable)
}
func pingAll(t *testing.T, clients []TailscaleClient, ips []netip.Addr) int {
t.Helper()
success := 0
for _, client := range clients {
for _, ip := range ips {
err := client.Ping(ip.String())
if err != nil {
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
} else {
success++
}
}
}
return success
}
func (s *AuthOIDCScenario) Shutdown() error {
err := s.pool.Purge(s.mockOIDC)
if err != nil {

View File

@@ -147,7 +147,7 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
}
}
scenario.WaitForTailscaleLogout()
scenario.waitForTailscaleLogout()
t.Logf("all clients logged out")
@@ -259,6 +259,22 @@ func (s *AuthWebFlowScenario) CreateHeadscaleEnv(
return nil
}
func (s *AuthWebFlowScenario) waitForTailscaleLogout() {
for _, namespace := range s.namespaces {
for _, client := range namespace.Clients {
namespace.syncWaitGroup.Add(1)
go func(c TailscaleClient) {
defer namespace.syncWaitGroup.Done()
// TODO(kradalby): error handle this
_ = c.WaitForLogout()
}(client)
}
namespace.syncWaitGroup.Wait()
}
}
func (s *AuthWebFlowScenario) runTailscaleUp(
namespaceStr, loginServer string,
) error {

View File

@@ -11,7 +11,7 @@ type ControlServer interface {
GetEndpoint() string
WaitForReady() error
CreateNamespace(namespace string) error
CreateAuthKey(namespace string, reusable bool, ephemeral bool) (*v1.PreAuthKey, error)
CreateAuthKey(namespace string) (*v1.PreAuthKey, error)
ListMachinesInNamespace(namespace string) ([]*v1.Machine, error)
GetCert() []byte
GetHostname() string

View File

@@ -2,7 +2,6 @@ package dockertestutil
import (
"errors"
"net"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
@@ -61,20 +60,3 @@ func AddContainerToNetwork(
return nil
}
// RandomFreeHostPort asks the kernel for a free open port that is ready to use.
// (from https://github.com/phayes/freeport)
func RandomFreeHostPort() (int, error) {
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
return 0, err
}
listener, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
}
defer listener.Close()
//nolint:forcetypeassert
return listener.Addr().(*net.TCPAddr).Port, nil
}

View File

@@ -2,7 +2,6 @@ package integration
import (
"fmt"
"net/netip"
"strings"
"testing"
"time"
@@ -67,239 +66,6 @@ func TestPingAllByIP(t *testing.T) {
}
}
func TestAuthKeyLogoutAndRelogin(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
spec := map[string]int{
"namespace1": len(TailscaleVersions),
"namespace2": len(TailscaleVersions),
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyip"))
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
clientIPs := make(map[TailscaleClient][]netip.Addr)
for _, client := range allClients {
ips, err := client.IPs()
if err != nil {
t.Errorf("failed to get IPs for client %s: %s", client.Hostname(), err)
}
clientIPs[client] = ips
}
for _, client := range allClients {
err := client.Logout()
if err != nil {
t.Errorf("failed to logout client %s: %s", client.Hostname(), err)
}
}
scenario.WaitForTailscaleLogout()
t.Logf("all clients logged out")
headscale, err := scenario.Headscale()
if err != nil {
t.Errorf("failed to get headscale server: %s", err)
}
for namespaceName := range spec {
key, err := scenario.CreatePreAuthKey(namespaceName, true, false)
if err != nil {
t.Errorf("failed to create pre-auth key for namespace %s: %s", namespaceName, err)
}
err = scenario.RunTailscaleUp(namespaceName, headscale.GetEndpoint(), key.GetKey())
if err != nil {
t.Errorf("failed to run tailscale up for namespace %s: %s", namespaceName, err)
}
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
allClients, err = scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
success := 0
for _, client := range allClients {
for _, ip := range allIps {
err := client.Ping(ip.String())
if err != nil {
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
} else {
success++
}
}
}
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
for _, client := range allClients {
ips, err := client.IPs()
if err != nil {
t.Errorf("failed to get IPs for client %s: %s", client.Hostname(), err)
}
// lets check if the IPs are the same
if len(ips) != len(clientIPs[client]) {
t.Errorf("IPs changed for client %s", client.Hostname())
}
for _, ip := range ips {
found := false
for _, oldIP := range clientIPs[client] {
if ip == oldIP {
found = true
break
}
}
if !found {
t.Errorf("IPs changed for client %s. Used to be %v now %v", client.Hostname(), clientIPs[client], ips)
}
}
}
t.Logf("all clients IPs are the same")
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func TestEphemeral(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
spec := map[string]int{
"namespace1": len(TailscaleVersions),
"namespace2": len(TailscaleVersions),
}
headscale, err := scenario.Headscale(hsic.WithTestName("ephemeral"))
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
for namespaceName, clientCount := range spec {
err = scenario.CreateNamespace(namespaceName)
if err != nil {
t.Errorf("failed to create namespace %s: %s", namespaceName, err)
}
err = scenario.CreateTailscaleNodesInNamespace(namespaceName, "all", clientCount, []tsic.Option{}...)
if err != nil {
t.Errorf("failed to create tailscale nodes in namespace %s: %s", namespaceName, err)
}
key, err := scenario.CreatePreAuthKey(namespaceName, true, true)
if err != nil {
t.Errorf("failed to create pre-auth key for namespace %s: %s", namespaceName, err)
}
err = scenario.RunTailscaleUp(namespaceName, headscale.GetEndpoint(), key.GetKey())
if err != nil {
t.Errorf("failed to run tailscale up for namespace %s: %s", namespaceName, err)
}
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
allClients, err := scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
success := 0
for _, client := range allClients {
for _, ip := range allIps {
err := client.Ping(ip.String())
if err != nil {
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
} else {
success++
}
}
}
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
for _, client := range allClients {
err := client.Logout()
if err != nil {
t.Errorf("failed to logout client %s: %s", client.Hostname(), err)
}
}
scenario.WaitForTailscaleLogout()
t.Logf("all clients logged out")
for namespaceName := range spec {
machines, err := headscale.ListMachinesInNamespace(namespaceName)
if err != nil {
log.Error().
Err(err).
Str("namespace", namespaceName).
Msg("Error listing machines in namespace")
return
}
if len(machines) != 0 {
t.Errorf("expected no machines, got %d in namespace %s", len(machines), namespaceName)
}
}
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func TestPingAllByHostname(t *testing.T) {
IntegrationSkip(t)
t.Parallel()

View File

@@ -60,8 +60,6 @@ package hsic
// }
// TODO: Reuse the actual configuration object above.
// Deprecated: use env function instead as it is easier to
// override.
func DefaultConfigYAML() string {
yaml := `
log:
@@ -97,35 +95,3 @@ derp:
return yaml
}
func MinimumConfigYAML() string {
return `
private_key_path: /tmp/private.key
noise:
private_key_path: /tmp/noise_private.key
`
}
func DefaultConfigEnv() map[string]string {
return map[string]string{
"HEADSCALE_LOG_LEVEL": "trace",
"HEADSCALE_ACL_POLICY_PATH": "",
"HEADSCALE_DB_TYPE": "sqlite3",
"HEADSCALE_DB_PATH": "/tmp/integration_test_db.sqlite3",
"HEADSCALE_EPHEMERAL_NODE_INACTIVITY_TIMEOUT": "30m",
"HEADSCALE_NODE_UPDATE_CHECK_INTERVAL": "10s",
"HEADSCALE_IP_PREFIXES": "fd7a:115c:a1e0::/48 100.64.0.0/10",
"HEADSCALE_DNS_CONFIG_BASE_DOMAIN": "headscale.net",
"HEADSCALE_DNS_CONFIG_MAGIC_DNS": "true",
"HEADSCALE_DNS_CONFIG_DOMAINS": "",
"HEADSCALE_DNS_CONFIG_NAMESERVERS": "127.0.0.11 1.1.1.1",
"HEADSCALE_PRIVATE_KEY_PATH": "/tmp/private.key",
"HEADSCALE_NOISE_PRIVATE_KEY_PATH": "/tmp/noise_private.key",
"HEADSCALE_LISTEN_ADDR": "0.0.0.0:8080",
"HEADSCALE_METRICS_LISTEN_ADDR": "127.0.0.1:9090",
"HEADSCALE_SERVER_URL": "http://headscale:8080",
"HEADSCALE_DERP_URLS": "https://controlplane.tailscale.com/derpmap/default",
"HEADSCALE_DERP_AUTO_UPDATE_ENABLED": "false",
"HEADSCALE_DERP_UPDATE_FREQUENCY": "1m",
}
}

View File

@@ -17,7 +17,6 @@ import (
"net/http"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/juanfont/headscale"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/integration/dockertestutil"
@@ -46,7 +45,7 @@ type HeadscaleInContainer struct {
// optional config
port int
aclPolicy *headscale.ACLPolicy
env map[string]string
env []string
tlsCert []byte
tlsKey []byte
}
@@ -56,7 +55,7 @@ type Option = func(c *HeadscaleInContainer)
func WithACLPolicy(acl *headscale.ACLPolicy) Option {
return func(hsic *HeadscaleInContainer) {
// TODO(kradalby): Move somewhere appropriate
hsic.env["HEADSCALE_ACL_POLICY_PATH"] = aclPolicyPath
hsic.env = append(hsic.env, fmt.Sprintf("HEADSCALE_ACL_POLICY_PATH=%s", aclPolicyPath))
hsic.aclPolicy = acl
}
@@ -70,8 +69,8 @@ func WithTLS() Option {
}
// TODO(kradalby): Move somewhere appropriate
hsic.env["HEADSCALE_TLS_CERT_PATH"] = tlsCertPath
hsic.env["HEADSCALE_TLS_KEY_PATH"] = tlsKeyPath
hsic.env = append(hsic.env, fmt.Sprintf("HEADSCALE_TLS_CERT_PATH=%s", tlsCertPath))
hsic.env = append(hsic.env, fmt.Sprintf("HEADSCALE_TLS_KEY_PATH=%s", tlsKeyPath))
hsic.tlsCert = cert
hsic.tlsKey = key
@@ -81,7 +80,7 @@ func WithTLS() Option {
func WithConfigEnv(configEnv map[string]string) Option {
return func(hsic *HeadscaleInContainer) {
for key, value := range configEnv {
hsic.env[key] = value
hsic.env = append(hsic.env, fmt.Sprintf("%s=%s", key, value))
}
}
}
@@ -103,10 +102,12 @@ func WithTestName(testName string) Option {
func WithHostnameAsServerURL() Option {
return func(hsic *HeadscaleInContainer) {
hsic.env["HEADSCALE_SERVER_URL"] = fmt.Sprintf("http://%s",
net.JoinHostPort(hsic.GetHostname(),
fmt.Sprintf("%d", hsic.port)),
)
hsic.env = append(
hsic.env,
fmt.Sprintf("HEADSCALE_SERVER_URL=http://%s:%d",
hsic.GetHostname(),
hsic.port,
))
}
}
@@ -128,8 +129,6 @@ func New(
pool: pool,
network: network,
env: DefaultConfigEnv(),
}
for _, opt := range opts {
@@ -145,13 +144,6 @@ func New(
ContextDir: dockerContextPath,
}
env := []string{}
for key, value := range hsic.env {
env = append(env, fmt.Sprintf("%s=%s", key, value))
}
log.Printf("ENV: \n%s", spew.Sdump(hsic.env))
runOptions := &dockertest.RunOptions{
Name: hsic.hostname,
ExposedPorts: []string{portProto},
@@ -160,7 +152,7 @@ func New(
// TODO(kradalby): Get rid of this hack, we currently need to give us some
// to inject the headscale configuration further down.
Entrypoint: []string{"/bin/bash", "-c", "/bin/sleep 3 ; headscale serve"},
Env: env,
Env: hsic.env,
}
// dockertest isnt very good at handling containers that has already
@@ -185,7 +177,7 @@ func New(
hsic.container = container
err = hsic.WriteFile("/etc/headscale/config.yaml", []byte(MinimumConfigYAML()))
err = hsic.WriteFile("/etc/headscale/config.yaml", []byte(DefaultConfigYAML()))
if err != nil {
return nil, fmt.Errorf("failed to write headscale config to container: %w", err)
}
@@ -324,8 +316,6 @@ func (t *HeadscaleInContainer) CreateNamespace(
func (t *HeadscaleInContainer) CreateAuthKey(
namespace string,
reusable bool,
ephemeral bool,
) (*v1.PreAuthKey, error) {
command := []string{
"headscale",
@@ -333,20 +323,13 @@ func (t *HeadscaleInContainer) CreateAuthKey(
namespace,
"preauthkeys",
"create",
"--reusable",
"--expiration",
"24h",
"--output",
"json",
}
if reusable {
command = append(command, "--reusable")
}
if ephemeral {
command = append(command, "--ephemeral")
}
result, _, err := dockertestutil.ExecuteCommand(
t.container,
command,

View File

@@ -43,11 +43,11 @@ var (
"1.24.2",
"1.22.2",
"1.20.4",
"1.18.2",
}
// tailscaleVersionsUnavailable = []string{
// // These versions seem to fail when fetching from apt.
// "1.18.2",
// "1.16.2",
// "1.14.6",
// "1.12.4",
@@ -194,9 +194,9 @@ func (s *Scenario) Headscale(opts ...hsic.Option) (ControlServer, error) {
return headscale, nil
}
func (s *Scenario) CreatePreAuthKey(namespace string, reusable bool, ephemeral bool) (*v1.PreAuthKey, error) {
func (s *Scenario) CreatePreAuthKey(namespace string) (*v1.PreAuthKey, error) {
if headscale, err := s.Headscale(); err == nil {
key, err := headscale.CreateAuthKey(namespace, reusable, ephemeral)
key, err := headscale.CreateAuthKey(namespace)
if err != nil {
return nil, fmt.Errorf("failed to create namespace: %w", err)
}
@@ -368,7 +368,7 @@ func (s *Scenario) CreateHeadscaleEnv(
return err
}
key, err := s.CreatePreAuthKey(namespaceName, true, false)
key, err := s.CreatePreAuthKey(namespaceName)
if err != nil {
return err
}
@@ -469,19 +469,3 @@ func (s *Scenario) ListTailscaleClientsFQDNs(namespaces ...string) ([]string, er
return allFQDNs, nil
}
func (s *Scenario) WaitForTailscaleLogout() {
for _, namespace := range s.namespaces {
for _, client := range namespace.Clients {
namespace.syncWaitGroup.Add(1)
go func(c TailscaleClient) {
defer namespace.syncWaitGroup.Done()
// TODO(kradalby): error handle this
_ = c.WaitForLogout()
}(client)
}
namespace.syncWaitGroup.Wait()
}
}

View File

@@ -62,7 +62,7 @@ func TestHeadscale(t *testing.T) {
})
t.Run("create-auth-key", func(t *testing.T) {
_, err := scenario.CreatePreAuthKey(namespace, true, false)
_, err := scenario.CreatePreAuthKey(namespace)
if err != nil {
t.Errorf("failed to create preauthkey: %s", err)
}
@@ -166,7 +166,7 @@ func TestTailscaleNodesJoiningHeadcale(t *testing.T) {
})
t.Run("join-headscale", func(t *testing.T) {
key, err := scenario.CreatePreAuthKey(namespace, true, false)
key, err := scenario.CreatePreAuthKey(namespace)
if err != nil {
t.Errorf("failed to create preauthkey: %s", err)
}

View File

@@ -153,15 +153,9 @@ func (machine *Machine) isOnline() bool {
return machine.LastSeen.After(time.Now().Add(-keepAliveInterval))
}
// isEphemeral returns if the machine is registered as an Ephemeral node.
// https://tailscale.com/kb/1111/ephemeral-nodes/
func (machine *Machine) isEphemeral() bool {
return machine.AuthKey != nil && machine.AuthKey.Ephemeral
}
func containsAddresses(inputs []string, addrs []string) bool {
for _, addr := range addrs {
if containsStr(inputs, addr) {
if contains(inputs, addr) {
return true
}
}
@@ -194,7 +188,6 @@ func getFilteredByACLPeers(
peers := make(map[uint64]Machine)
// Aclfilter peers here. We are itering through machines in all namespaces and search through the computed aclRules
// for match between rule SrcIPs and DstPorts. If the rule is a match we allow the machine to be viewable.
machineIPs := machine.IPAddresses.ToStringSlice()
for _, peer := range machines {
if peer.ID == machine.ID {
continue
@@ -204,23 +197,22 @@ func getFilteredByACLPeers(
for _, d := range rule.DstPorts {
dst = append(dst, d.IP)
}
peerIPs := peer.IPAddresses.ToStringSlice()
if matchSourceAndDestinationWithRule(
rule.SrcIPs,
dst,
machineIPs,
peerIPs,
machine.IPAddresses.ToStringSlice(),
peer.IPAddresses.ToStringSlice(),
) || // match source and destination
matchSourceAndDestinationWithRule(
rule.SrcIPs,
dst,
peerIPs,
machineIPs,
peer.IPAddresses.ToStringSlice(),
machine.IPAddresses.ToStringSlice(),
) || // match return path
matchSourceAndDestinationWithRule(
rule.SrcIPs,
dst,
machineIPs,
machine.IPAddresses.ToStringSlice(),
[]string{"*"},
) || // match source and all destination
matchSourceAndDestinationWithRule(
@@ -233,13 +225,13 @@ func getFilteredByACLPeers(
rule.SrcIPs,
dst,
[]string{"*"},
peerIPs,
peer.IPAddresses.ToStringSlice(),
) || // match source and all destination
matchSourceAndDestinationWithRule(
rule.SrcIPs,
dst,
[]string{"*"},
machineIPs,
machine.IPAddresses.ToStringSlice(),
) { // match all sources and source
peers[peer.ID] = peer
}
@@ -394,7 +386,7 @@ func (h *Headscale) GetMachineByGivenName(namespace string, givenName string) (*
// GetMachineByID finds a Machine by ID and returns the Machine struct.
func (h *Headscale) GetMachineByID(id uint64) (*Machine, error) {
m := Machine{}
if result := h.db.Preload("AuthKey").Preload("Namespace").Find(&Machine{ID: id}).First(&m); result.Error != nil {
if result := h.db.Preload("Namespace").Find(&Machine{ID: id}).First(&m); result.Error != nil {
return nil, result.Error
}
@@ -406,7 +398,7 @@ func (h *Headscale) GetMachineByMachineKey(
machineKey key.MachinePublic,
) (*Machine, error) {
m := Machine{}
if result := h.db.Preload("AuthKey").Preload("Namespace").First(&m, "machine_key = ?", MachinePublicKeyStripPrefix(machineKey)); result.Error != nil {
if result := h.db.Preload("Namespace").First(&m, "machine_key = ?", MachinePublicKeyStripPrefix(machineKey)); result.Error != nil {
return nil, result.Error
}
@@ -418,7 +410,7 @@ func (h *Headscale) GetMachineByNodeKey(
nodeKey key.NodePublic,
) (*Machine, error) {
machine := Machine{}
if result := h.db.Preload("AuthKey").Preload("Namespace").First(&machine, "node_key = ?",
if result := h.db.Preload("Namespace").First(&machine, "node_key = ?",
NodePublicKeyStripPrefix(nodeKey)); result.Error != nil {
return nil, result.Error
}
@@ -431,7 +423,7 @@ func (h *Headscale) GetMachineByAnyKey(
machineKey key.MachinePublic, nodeKey key.NodePublic, oldNodeKey key.NodePublic,
) (*Machine, error) {
machine := Machine{}
if result := h.db.Preload("AuthKey").Preload("Namespace").First(&machine, "machine_key = ? OR node_key = ? OR node_key = ?",
if result := h.db.Preload("Namespace").First(&machine, "machine_key = ? OR node_key = ? OR node_key = ?",
MachinePublicKeyStripPrefix(machineKey),
NodePublicKeyStripPrefix(nodeKey),
NodePublicKeyStripPrefix(oldNodeKey)); result.Error != nil {
@@ -691,15 +683,7 @@ func (h *Headscale) toNode(
}
primaryPrefixes := Routes(primaryRoutes).toPrefixes()
machineRoutes, err := h.GetMachineRoutes(&machine)
if err != nil {
return nil, err
}
for _, route := range machineRoutes {
if route.Enabled && (route.IsPrimary || route.isExitRoute()) {
allowedIPs = append(allowedIPs, netip.Prefix(route.Prefix))
}
}
allowedIPs = append(allowedIPs, primaryPrefixes...)
var derp string
if machine.HostInfo.NetInfo != nil {
@@ -860,7 +844,6 @@ func getTags(
func (h *Headscale) RegisterMachineFromAuthCallback(
nodeKeyStr string,
namespaceName string,
machineExpiry *time.Time,
registrationMethod string,
) (*Machine, error) {
nodeKey := key.NodePublic{}
@@ -873,7 +856,6 @@ func (h *Headscale) RegisterMachineFromAuthCallback(
Str("nodeKey", nodeKey.ShortString()).
Str("namespaceName", namespaceName).
Str("registrationMethod", registrationMethod).
Str("expiresAt", fmt.Sprintf("%v", machineExpiry)).
Msg("Registering machine from API/CLI or auth callback")
if machineInterface, ok := h.registrationCache.Get(NodePublicKeyStripPrefix(nodeKey)); ok {
@@ -895,10 +877,6 @@ func (h *Headscale) RegisterMachineFromAuthCallback(
registrationMachine.NamespaceID = namespace.ID
registrationMachine.RegisterMethod = registrationMethod
if machineExpiry != nil {
registrationMachine.Expiry = machineExpiry
}
machine, err := h.RegisterMachine(
registrationMachine,
)

13
oidc.go
View File

@@ -218,7 +218,7 @@ func (h *Headscale) OIDCCallback(
return
}
nodeKey, machineExists, err := h.validateMachineForOIDCCallback(writer, state, claims, idToken.Expiry)
nodeKey, machineExists, err := h.validateMachineForOIDCCallback(writer, state, claims)
if err != nil || machineExists {
return
}
@@ -236,7 +236,7 @@ func (h *Headscale) OIDCCallback(
return
}
if err := h.registerMachineForOIDCCallback(writer, namespace, nodeKey, idToken.Expiry); err != nil {
if err := h.registerMachineForOIDCCallback(writer, namespace, nodeKey); err != nil {
return
}
@@ -476,7 +476,6 @@ func (h *Headscale) validateMachineForOIDCCallback(
writer http.ResponseWriter,
state string,
claims *IDTokenClaims,
expiry time.Time,
) (*key.NodePublic, bool, error) {
// retrieve machinekey from state cache
nodeKeyIf, nodeKeyFound := h.registrationCache.Get(state)
@@ -547,7 +546,7 @@ func (h *Headscale) validateMachineForOIDCCallback(
Str("machine", machine.Hostname).
Msg("machine already registered, reauthenticating")
err := h.RefreshMachine(machine, expiry)
err := h.RefreshMachine(machine, time.Time{})
if err != nil {
log.Error().
Caller().
@@ -561,10 +560,6 @@ func (h *Headscale) validateMachineForOIDCCallback(
return nil, true, err
}
log.Debug().
Str("machine", machine.Hostname).
Str("expiresAt", fmt.Sprintf("%v", expiry)).
Msg("successfully refreshed machine")
var content bytes.Buffer
if err := oidcCallbackTemplate.Execute(&content, oidcCallbackTemplateConfig{
@@ -684,12 +679,10 @@ func (h *Headscale) registerMachineForOIDCCallback(
writer http.ResponseWriter,
namespace *Namespace,
nodeKey *key.NodePublic,
expiry time.Time,
) error {
if _, err := h.RegisterMachineFromAuthCallback(
nodeKey.String(),
namespace.Name,
&expiry,
RegisterMethodOIDC,
); err != nil {
log.Error().

View File

@@ -255,7 +255,7 @@ func (h *Headscale) ApplePlatformConfig(
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusBadRequest)
_, err := writer.Write(
[]byte("Invalid platform. Only ios, macos-app-store and macos-standalone are supported"),
[]byte("Invalid platform, only ios and macos is supported"),
)
if err != nil {
log.Error().

View File

@@ -622,20 +622,6 @@ func (h *Headscale) handleMachineLogOutCommon(
Caller().
Err(err).
Msg("Failed to write response")
return
}
if machine.isEphemeral() {
err = h.HardDeleteMachine(&machine)
if err != nil {
log.Error().
Err(err).
Str("machine", machine.Hostname).
Msg("Cannot delete ephemeral machine from the database")
}
return
}
log.Info().

View File

@@ -664,11 +664,7 @@ func (h *Headscale) scheduledPollWorker(
Str("machine", machine.Hostname).
Bool("noise", isNoise).
Msg("Sending keepalive")
select {
case keepAliveChan <- data:
case <-ctx.Done():
return
}
keepAliveChan <- data
case <-updateCheckerTicker.C:
log.Debug().
@@ -678,11 +674,7 @@ func (h *Headscale) scheduledPollWorker(
Msg("Sending update request")
updateRequestsFromNode.WithLabelValues(machine.Namespace.Name, machine.Hostname, "scheduled-update").
Inc()
select {
case updateChan <- struct{}{}:
case <-ctx.Done():
return
}
updateChan <- struct{}{}
}
}
}

View File

@@ -6,7 +6,6 @@ import (
"gopkg.in/check.v1"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
func (s *Suite) TestGetRoutes(c *check.C) {
@@ -353,102 +352,3 @@ func (s *Suite) TestSubnetFailover(c *check.C) {
c.Assert(err, check.IsNil)
c.Assert(len(routes), check.Equals, 2)
}
// TestAllowedIPRoutes tests that the AllowedIPs are correctly set for a node,
// including both the primary routes the node is responsible for, and the
// exit node routes if enabled.
func (s *Suite) TestAllowedIPRoutes(c *check.C) {
namespace, err := app.CreateNamespace("test")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
c.Assert(err, check.IsNil)
_, err = app.GetMachine("test", "test_enable_route_machine")
c.Assert(err, check.NotNil)
prefix, err := netip.ParsePrefix(
"10.0.0.0/24",
)
c.Assert(err, check.IsNil)
prefix2, err := netip.ParsePrefix(
"150.0.10.0/25",
)
c.Assert(err, check.IsNil)
prefixExitNodeV4, err := netip.ParsePrefix(
"0.0.0.0/0",
)
c.Assert(err, check.IsNil)
prefixExitNodeV6, err := netip.ParsePrefix(
"::/0",
)
c.Assert(err, check.IsNil)
hostInfo1 := tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{prefix, prefix2, prefixExitNodeV4, prefixExitNodeV6},
}
nodeKey := key.NewNode()
discoKey := key.NewDisco()
machineKey := key.NewMachine()
now := time.Now()
machine1 := Machine{
ID: 1,
MachineKey: MachinePublicKeyStripPrefix(machineKey.Public()),
NodeKey: NodePublicKeyStripPrefix(nodeKey.Public()),
DiscoKey: DiscoPublicKeyStripPrefix(discoKey.Public()),
Hostname: "test_enable_route_machine",
NamespaceID: namespace.ID,
RegisterMethod: RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID),
HostInfo: HostInfo(hostInfo1),
LastSeen: &now,
}
app.db.Save(&machine1)
err = app.processMachineRoutes(&machine1)
c.Assert(err, check.IsNil)
err = app.EnableRoutes(&machine1, prefix.String())
c.Assert(err, check.IsNil)
// We do not enable this one on purpose to test that it is not enabled
// err = app.EnableRoutes(&machine1, prefix2.String())
// c.Assert(err, check.IsNil)
err = app.EnableRoutes(&machine1, prefixExitNodeV4.String())
c.Assert(err, check.IsNil)
err = app.EnableRoutes(&machine1, prefixExitNodeV6.String())
c.Assert(err, check.IsNil)
err = app.handlePrimarySubnetFailover()
c.Assert(err, check.IsNil)
enabledRoutes1, err := app.GetEnabledRoutes(&machine1)
c.Assert(err, check.IsNil)
c.Assert(len(enabledRoutes1), check.Equals, 3)
peer, err := app.toNode(machine1, "headscale.net", nil)
c.Assert(err, check.IsNil)
c.Assert(len(peer.AllowedIPs), check.Equals, 3)
foundExitNodeV4 := false
foundExitNodeV6 := false
for _, allowedIP := range peer.AllowedIPs {
if allowedIP == prefixExitNodeV4 {
foundExitNodeV4 = true
}
if allowedIP == prefixExitNodeV6 {
foundExitNodeV6 = true
}
}
c.Assert(foundExitNodeV4, check.Equals, true)
c.Assert(foundExitNodeV6, check.Equals, true)
}

View File

@@ -24,7 +24,7 @@
<li>
ALT + Click the Tailscale icon in the menu and hover over the Debug menu
</li>
<li>Under "Custom Login Server", select "Add Account..."</li>
<li>Under "Custm Login Server", select "Add Account..."</li>
<li>
Enter "{{.URL}}" of the headscale instance and press "Add Account"
</li>
@@ -68,16 +68,7 @@
<!--
<pre><code>curl {{.URL}}/apple/ios</code></pre>
-->
<ul>
<li>
for app store client:
<code>curl {{.URL}}/apple/macos-app-store</code>
</li>
<li>
for standalone client:
<code>curl {{.URL}}/apple/macos-standalone</code>
</li>
</ul>
<pre><code>curl {{.URL}}/apple/macos</code></pre>
<h2>Profiles</h2>

View File

@@ -269,16 +269,6 @@ func stringToIPPrefix(prefixes []string) ([]netip.Prefix, error) {
return result, nil
}
func containsStr(ts []string, t string) bool {
for _, v := range ts {
if v == t {
return true
}
}
return false
}
func contains[T string | netip.Prefix](ts []T, t T) bool {
for _, v := range ts {
if reflect.DeepEqual(v, t) {