mirror of
https://github.com/juanfont/headscale.git
synced 2026-02-14 11:47:43 +01:00
Compare commits
11 Commits
v0.16.0-be
...
v0.16.0-be
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c50d3aa9bd | ||
|
|
4ccff8bf28 | ||
|
|
5b5298b025 | ||
|
|
8e0939f403 | ||
|
|
cf3fc85196 | ||
|
|
e0b15c18ce | ||
|
|
566b8c3df3 | ||
|
|
32a6151df9 | ||
|
|
3777de7133 | ||
|
|
8cae4f80d7 | ||
|
|
911c5bddce |
7
.github/workflows/test-integration.yml
vendored
7
.github/workflows/test-integration.yml
vendored
@@ -27,4 +27,9 @@ jobs:
|
||||
|
||||
- name: Run Integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: nix develop --command -- make test_integration
|
||||
uses: nick-fields/retry@v2
|
||||
with:
|
||||
timeout_minutes: 240
|
||||
max_attempts: 5
|
||||
retry_on: error
|
||||
command: nix develop --command -- make test_integration
|
||||
|
||||
@@ -30,11 +30,10 @@
|
||||
- Add -c option to specify config file from command line [#285](https://github.com/juanfont/headscale/issues/285) [#612](https://github.com/juanfont/headscale/pull/601)
|
||||
- Add configuration option to allow Tailscale clients to use a random WireGuard port. [kb/1181/firewalls](https://tailscale.com/kb/1181/firewalls) [#624](https://github.com/juanfont/headscale/pull/624)
|
||||
- Improve obtuse UX regarding missing configuration (`ephemeral_node_inactivity_timeout` not set) [#639](https://github.com/juanfont/headscale/pull/639)
|
||||
- Fix nodes being shown as 'offline' in `tailscale status` [648](https://github.com/juanfont/headscale/pull/648)
|
||||
- Fix nodes being shown as 'offline' in `tailscale status` [#648](https://github.com/juanfont/headscale/pull/648)
|
||||
- Improve shutdown behaviour [#651](https://github.com/juanfont/headscale/pull/651)
|
||||
- Drop Gin as web framework in Headscale [648](https://github.com/juanfont/headscale/pull/648)
|
||||
|
||||
- Make tailnet node updates check interval configurable [#675](https://github.com/juanfont/headscale/pull/675)
|
||||
|
||||
## 0.15.0 (2022-03-20)
|
||||
|
||||
|
||||
@@ -103,6 +103,12 @@ disable_check_updates: false
|
||||
# Time before an inactive ephemeral node is deleted?
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
|
||||
# Period to check for node updates in the tailnet. A value too low will severily affect
|
||||
# CPU consumption of Headscale. A value too high (over 60s) will cause problems
|
||||
# to the nodes, as they won't get updates or keep alive messages in time.
|
||||
# In case of doubts, do not touch the default 10s.
|
||||
node_update_check_interval: 10s
|
||||
|
||||
# SQLite config
|
||||
db_type: sqlite3
|
||||
db_path: /var/lib/headscale/db.sqlite
|
||||
|
||||
16
config.go
16
config.go
@@ -26,6 +26,7 @@ type Config struct {
|
||||
GRPCAddr string
|
||||
GRPCAllowInsecure bool
|
||||
EphemeralNodeInactivityTimeout time.Duration
|
||||
NodeUpdateCheckInterval time.Duration
|
||||
IPPrefixes []netaddr.IPPrefix
|
||||
PrivateKeyPath string
|
||||
BaseDomain string
|
||||
@@ -162,6 +163,8 @@ func LoadConfig(path string, isFile bool) error {
|
||||
|
||||
viper.SetDefault("ephemeral_node_inactivity_timeout", "120s")
|
||||
|
||||
viper.SetDefault("node_update_check_interval", "10s")
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to read configuration from disk")
|
||||
|
||||
@@ -217,6 +220,15 @@ func LoadConfig(path string, isFile bool) error {
|
||||
)
|
||||
}
|
||||
|
||||
maxNodeUpdateCheckInterval, _ := time.ParseDuration("60s")
|
||||
if viper.GetDuration("node_update_check_interval") > maxNodeUpdateCheckInterval {
|
||||
errorText += fmt.Sprintf(
|
||||
"Fatal config error: node_update_check_interval (%s) is set too high, must be less than %s",
|
||||
viper.GetString("node_update_check_interval"),
|
||||
maxNodeUpdateCheckInterval,
|
||||
)
|
||||
}
|
||||
|
||||
if errorText != "" {
|
||||
//nolint
|
||||
return errors.New(strings.TrimSuffix(errorText, "\n"))
|
||||
@@ -478,6 +490,10 @@ func GetHeadscaleConfig() (*Config, error) {
|
||||
"ephemeral_node_inactivity_timeout",
|
||||
),
|
||||
|
||||
NodeUpdateCheckInterval: viper.GetDuration(
|
||||
"node_update_check_interval",
|
||||
),
|
||||
|
||||
DBtype: viper.GetString("db_type"),
|
||||
DBpath: AbsolutePathFromConfigPath(viper.GetString("db_path")),
|
||||
DBhost: viper.GetString("db_host"),
|
||||
|
||||
@@ -40,13 +40,13 @@ func (s *IntegrationCLITestSuite) SetupTest() {
|
||||
if ppool, err := dockertest.NewPool(""); err == nil {
|
||||
s.pool = *ppool
|
||||
} else {
|
||||
log.Fatalf("Could not connect to docker: %s", err)
|
||||
s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
|
||||
}
|
||||
|
||||
if pnetwork, err := s.pool.CreateNetwork("headscale-test"); err == nil {
|
||||
s.network = *pnetwork
|
||||
} else {
|
||||
log.Fatalf("Could not create network: %s", err)
|
||||
s.FailNow(fmt.Sprintf("Could not create network: %s", err), "")
|
||||
}
|
||||
|
||||
headscaleBuildOptions := &dockertest.BuildOptions{
|
||||
@@ -56,7 +56,7 @@ func (s *IntegrationCLITestSuite) SetupTest() {
|
||||
|
||||
currentPath, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not determine current path: %s", err)
|
||||
s.FailNow(fmt.Sprintf("Could not determine current path: %s", err), "")
|
||||
}
|
||||
|
||||
headscaleOptions := &dockertest.RunOptions{
|
||||
@@ -68,11 +68,16 @@ func (s *IntegrationCLITestSuite) SetupTest() {
|
||||
Cmd: []string{"headscale", "serve"},
|
||||
}
|
||||
|
||||
err = s.pool.RemoveContainerByName(headscaleHostname)
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Could not remove existing container before building test: %s", err), "")
|
||||
}
|
||||
|
||||
fmt.Println("Creating headscale container")
|
||||
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
|
||||
s.headscale = *pheadscale
|
||||
} else {
|
||||
log.Fatalf("Could not start headscale container: %s", err)
|
||||
s.FailNow(fmt.Sprintf("Could not start headscale container: %s", err), "")
|
||||
}
|
||||
fmt.Println("Created headscale container")
|
||||
|
||||
|
||||
@@ -6,7 +6,10 @@ package headscale
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -16,9 +19,13 @@ import (
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
const DOCKER_EXECUTE_TIMEOUT = 10 * time.Second
|
||||
const (
|
||||
DOCKER_EXECUTE_TIMEOUT = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
errEnvVarEmpty = errors.New("getenv: environment variable empty")
|
||||
|
||||
IpPrefix4 = netaddr.MustParseIPPrefix("100.64.0.0/10")
|
||||
IpPrefix6 = netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/48")
|
||||
|
||||
@@ -283,3 +290,25 @@ func getMagicFQDN(
|
||||
|
||||
return hostnames, nil
|
||||
}
|
||||
|
||||
func GetEnvStr(key string) (string, error) {
|
||||
v := os.Getenv(key)
|
||||
if v == "" {
|
||||
return v, errEnvVarEmpty
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func GetEnvBool(key string) (bool, error) {
|
||||
s, err := GetEnvStr(key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
v, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
@@ -40,41 +40,50 @@ type IntegrationDERPTestSuite struct {
|
||||
pool dockertest.Pool
|
||||
networks map[int]dockertest.Network // so we keep the containers isolated
|
||||
headscale dockertest.Resource
|
||||
saveLogs bool
|
||||
|
||||
tailscales map[string]dockertest.Resource
|
||||
joinWaitGroup sync.WaitGroup
|
||||
}
|
||||
|
||||
func TestDERPIntegrationTestSuite(t *testing.T) {
|
||||
saveLogs, err := GetEnvBool("HEADSCALE_INTEGRATION_SAVE_LOG")
|
||||
if err != nil {
|
||||
saveLogs = false
|
||||
}
|
||||
|
||||
s := new(IntegrationDERPTestSuite)
|
||||
|
||||
s.tailscales = make(map[string]dockertest.Resource)
|
||||
s.networks = make(map[int]dockertest.Network)
|
||||
s.saveLogs = saveLogs
|
||||
|
||||
suite.Run(t, s)
|
||||
|
||||
// HandleStats, which allows us to check if we passed and save logs
|
||||
// is called after TearDown, so we cannot tear down containers before
|
||||
// we have potentially saved the logs.
|
||||
for _, tailscale := range s.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
if s.saveLogs {
|
||||
for _, tailscale := range s.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !s.stats.Passed() {
|
||||
err := s.saveLog(&s.headscale, "test_output")
|
||||
if err != nil {
|
||||
log.Printf("Could not save log: %s\n", err)
|
||||
}
|
||||
}
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !s.stats.Passed() {
|
||||
err := s.saveLog(&s.headscale, "test_output")
|
||||
if err != nil {
|
||||
log.Printf("Could not save log: %s\n", err)
|
||||
}
|
||||
}
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
for _, network := range s.networks {
|
||||
if err := network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
for _, network := range s.networks {
|
||||
if err := network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -83,14 +92,14 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
if ppool, err := dockertest.NewPool(""); err == nil {
|
||||
s.pool = *ppool
|
||||
} else {
|
||||
log.Fatalf("Could not connect to docker: %s", err)
|
||||
s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
|
||||
}
|
||||
|
||||
for i := 0; i < totalContainers; i++ {
|
||||
if pnetwork, err := s.pool.CreateNetwork(fmt.Sprintf("headscale-derp-%d", i)); err == nil {
|
||||
s.networks[i] = *pnetwork
|
||||
} else {
|
||||
log.Fatalf("Could not create network: %s", err)
|
||||
s.FailNow(fmt.Sprintf("Could not create network: %s", err), "")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,7 +110,7 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
|
||||
currentPath, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not determine current path: %s", err)
|
||||
s.FailNow(fmt.Sprintf("Could not determine current path: %s", err), "")
|
||||
}
|
||||
|
||||
headscaleOptions := &dockertest.RunOptions{
|
||||
@@ -120,11 +129,16 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
},
|
||||
}
|
||||
|
||||
err = s.pool.RemoveContainerByName(headscaleHostname)
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Could not remove existing container before building test: %s", err), "")
|
||||
}
|
||||
|
||||
log.Println("Creating headscale container")
|
||||
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
|
||||
s.headscale = *pheadscale
|
||||
} else {
|
||||
log.Fatalf("Could not start headscale container: %s", err)
|
||||
s.FailNow(fmt.Sprintf("Could not start headscale container: %s", err), "")
|
||||
}
|
||||
log.Println("Created headscale container to test DERP")
|
||||
|
||||
@@ -290,6 +304,23 @@ func (s *IntegrationDERPTestSuite) tailscaleContainer(
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) TearDownSuite() {
|
||||
if !s.saveLogs {
|
||||
for _, tailscale := range s.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
for _, network := range s.networks {
|
||||
if err := network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) HandleStats(
|
||||
|
||||
@@ -36,6 +36,7 @@ type IntegrationTestSuite struct {
|
||||
pool dockertest.Pool
|
||||
network dockertest.Network
|
||||
headscale dockertest.Resource
|
||||
saveLogs bool
|
||||
|
||||
namespaces map[string]TestNamespace
|
||||
|
||||
@@ -43,6 +44,11 @@ type IntegrationTestSuite struct {
|
||||
}
|
||||
|
||||
func TestIntegrationTestSuite(t *testing.T) {
|
||||
saveLogs, err := GetEnvBool("HEADSCALE_INTEGRATION_SAVE_LOG")
|
||||
if err != nil {
|
||||
saveLogs = false
|
||||
}
|
||||
|
||||
s := new(IntegrationTestSuite)
|
||||
|
||||
s.namespaces = map[string]TestNamespace{
|
||||
@@ -55,32 +61,35 @@ func TestIntegrationTestSuite(t *testing.T) {
|
||||
tailscales: make(map[string]dockertest.Resource),
|
||||
},
|
||||
}
|
||||
s.saveLogs = saveLogs
|
||||
|
||||
suite.Run(t, s)
|
||||
|
||||
// HandleStats, which allows us to check if we passed and save logs
|
||||
// is called after TearDown, so we cannot tear down containers before
|
||||
// we have potentially saved the logs.
|
||||
for _, scales := range s.namespaces {
|
||||
for _, tailscale := range scales.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
if s.saveLogs {
|
||||
for _, scales := range s.namespaces {
|
||||
for _, tailscale := range scales.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !s.stats.Passed() {
|
||||
err := s.saveLog(&s.headscale, "test_output")
|
||||
if err != nil {
|
||||
log.Printf("Could not save log: %s\n", err)
|
||||
if !s.stats.Passed() {
|
||||
err := s.saveLog(&s.headscale, "test_output")
|
||||
if err != nil {
|
||||
log.Printf("Could not save log: %s\n", err)
|
||||
}
|
||||
}
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
if err := s.network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
if err := s.network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -209,13 +218,13 @@ func (s *IntegrationTestSuite) SetupSuite() {
|
||||
if ppool, err := dockertest.NewPool(""); err == nil {
|
||||
s.pool = *ppool
|
||||
} else {
|
||||
log.Fatalf("Could not connect to docker: %s", err)
|
||||
s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
|
||||
}
|
||||
|
||||
if pnetwork, err := s.pool.CreateNetwork("headscale-test"); err == nil {
|
||||
s.network = *pnetwork
|
||||
} else {
|
||||
log.Fatalf("Could not create network: %s", err)
|
||||
s.FailNow(fmt.Sprintf("Could not create network: %s", err), "")
|
||||
}
|
||||
|
||||
headscaleBuildOptions := &dockertest.BuildOptions{
|
||||
@@ -225,7 +234,7 @@ func (s *IntegrationTestSuite) SetupSuite() {
|
||||
|
||||
currentPath, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not determine current path: %s", err)
|
||||
s.FailNow(fmt.Sprintf("Could not determine current path: %s", err), "")
|
||||
}
|
||||
|
||||
headscaleOptions := &dockertest.RunOptions{
|
||||
@@ -237,11 +246,16 @@ func (s *IntegrationTestSuite) SetupSuite() {
|
||||
Cmd: []string{"headscale", "serve"},
|
||||
}
|
||||
|
||||
err = s.pool.RemoveContainerByName(headscaleHostname)
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Could not remove existing container before building test: %s", err), "")
|
||||
}
|
||||
|
||||
log.Println("Creating headscale container")
|
||||
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
|
||||
s.headscale = *pheadscale
|
||||
} else {
|
||||
log.Fatalf("Could not start headscale container: %s", err)
|
||||
s.FailNow(fmt.Sprintf("Could not start headscale container: %s", err), "")
|
||||
}
|
||||
log.Println("Created headscale container")
|
||||
|
||||
@@ -338,6 +352,23 @@ func (s *IntegrationTestSuite) SetupSuite() {
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TearDownSuite() {
|
||||
if !s.saveLogs {
|
||||
for _, scales := range s.namespaces {
|
||||
for _, tailscale := range scales.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
if err := s.network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) HandleStats(
|
||||
|
||||
@@ -20,6 +20,7 @@ dns_config:
|
||||
nameservers:
|
||||
- 1.1.1.1
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
node_update_check_interval: 10s
|
||||
grpc_allow_insecure: false
|
||||
grpc_listen_addr: :50443
|
||||
ip_prefixes:
|
||||
|
||||
@@ -2,6 +2,7 @@ log_level: trace
|
||||
acl_policy_path: ""
|
||||
db_type: sqlite3
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
node_update_check_interval: 10s
|
||||
ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
|
||||
@@ -20,6 +20,7 @@ dns_config:
|
||||
nameservers:
|
||||
- 1.1.1.1
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
node_update_check_interval: 10s
|
||||
grpc_allow_insecure: false
|
||||
grpc_listen_addr: :50443
|
||||
ip_prefixes:
|
||||
|
||||
@@ -2,6 +2,7 @@ log_level: trace
|
||||
acl_policy_path: ""
|
||||
db_type: sqlite3
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
node_update_check_interval: 10s
|
||||
ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
|
||||
@@ -2,6 +2,7 @@ log_level: trace
|
||||
acl_policy_path: ""
|
||||
db_type: sqlite3
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
node_update_check_interval: 10s
|
||||
ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
|
||||
5
poll.go
5
poll.go
@@ -16,8 +16,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
keepAliveInterval = 60 * time.Second
|
||||
updateCheckInterval = 10 * time.Second
|
||||
keepAliveInterval = 60 * time.Second
|
||||
)
|
||||
|
||||
type contextKey string
|
||||
@@ -640,7 +639,7 @@ func (h *Headscale) scheduledPollWorker(
|
||||
machine *Machine,
|
||||
) {
|
||||
keepAliveTicker := time.NewTicker(keepAliveInterval)
|
||||
updateCheckerTicker := time.NewTicker(updateCheckInterval)
|
||||
updateCheckerTicker := time.NewTicker(h.cfg.NodeUpdateCheckInterval)
|
||||
|
||||
defer closeChanWithLog(
|
||||
updateChan,
|
||||
|
||||
Reference in New Issue
Block a user