Compare commits

..

11 Commits

Author SHA1 Message Date
Juan Font
c50d3aa9bd Merge pull request #675 from juanfont/configurable-update-interval
Make tailnet updates check interval configurable
2022-07-12 13:34:49 +02:00
Juan Font Alonso
4ccff8bf28 Added the new parameter to the integration test params 2022-07-12 13:13:04 +02:00
Juan Font Alonso
5b5298b025 Renamed config param for node update check internal 2022-07-12 12:52:03 +02:00
Juan Font Alonso
8e0939f403 Updated changelog 2022-07-12 12:33:42 +02:00
Juan Font Alonso
cf3fc85196 Make tailnet updates check configurable 2022-07-12 12:27:28 +02:00
Juan Font
e0b15c18ce Merge pull request #667 from kradalby/rerun-docker
Make integration tests retry on failure.
2022-06-27 17:04:39 +02:00
Kristoffer Dalby
566b8c3df3 Fix issue were dockertest fails to start because of container mismatch 2022-06-27 12:07:30 +00:00
Kristoffer Dalby
32a6151df9 Rerun integration tests 5 times if error 2022-06-27 12:02:29 +00:00
Kristoffer Dalby
3777de7133 Use failnow for cli tests aswell 2022-06-27 12:00:21 +00:00
Kristoffer Dalby
8cae4f80d7 Fail tests instead of fatal
Currently we exit the program if the setup does not work, this can cause
is to leave containers and other resources behind since we dont run
TearDown. This change will just fail the test if we cant set up, which
should mean that the TearDown runs aswell.
2022-06-27 11:58:16 +00:00
Kristoffer Dalby
911c5bddce Make saving logs from tests an option (default false)
We currently have a bit of flaky logic which prevents the docker plugin
from cleaning up the containers if the tests or setup fatals or crashes,
this is due to a limitation in the save / passed stats handling.

This change makes it an environment variable which by default ditches
the logs and makes the containers clean up "correctly" in the teardown
method.
2022-06-27 11:56:37 +00:00
14 changed files with 176 additions and 50 deletions

View File

@@ -27,4 +27,9 @@ jobs:
- name: Run Integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: nix develop --command -- make test_integration
uses: nick-fields/retry@v2
with:
timeout_minutes: 240
max_attempts: 5
retry_on: error
command: nix develop --command -- make test_integration

View File

@@ -30,11 +30,10 @@
- Add -c option to specify config file from command line [#285](https://github.com/juanfont/headscale/issues/285) [#612](https://github.com/juanfont/headscale/pull/601)
- Add configuration option to allow Tailscale clients to use a random WireGuard port. [kb/1181/firewalls](https://tailscale.com/kb/1181/firewalls) [#624](https://github.com/juanfont/headscale/pull/624)
- Improve obtuse UX regarding missing configuration (`ephemeral_node_inactivity_timeout` not set) [#639](https://github.com/juanfont/headscale/pull/639)
- Fix nodes being shown as 'offline' in `tailscale status` [648](https://github.com/juanfont/headscale/pull/648)
- Fix nodes being shown as 'offline' in `tailscale status` [#648](https://github.com/juanfont/headscale/pull/648)
- Improve shutdown behaviour [#651](https://github.com/juanfont/headscale/pull/651)
- Drop Gin as web framework in Headscale [648](https://github.com/juanfont/headscale/pull/648)
- Make tailnet node updates check interval configurable [#675](https://github.com/juanfont/headscale/pull/675)
## 0.15.0 (2022-03-20)

View File

@@ -103,6 +103,12 @@ disable_check_updates: false
# Time before an inactive ephemeral node is deleted?
ephemeral_node_inactivity_timeout: 30m
# Period to check for node updates in the tailnet. A value too low will severily affect
# CPU consumption of Headscale. A value too high (over 60s) will cause problems
# to the nodes, as they won't get updates or keep alive messages in time.
# In case of doubts, do not touch the default 10s.
node_update_check_interval: 10s
# SQLite config
db_type: sqlite3
db_path: /var/lib/headscale/db.sqlite

View File

@@ -26,6 +26,7 @@ type Config struct {
GRPCAddr string
GRPCAllowInsecure bool
EphemeralNodeInactivityTimeout time.Duration
NodeUpdateCheckInterval time.Duration
IPPrefixes []netaddr.IPPrefix
PrivateKeyPath string
BaseDomain string
@@ -162,6 +163,8 @@ func LoadConfig(path string, isFile bool) error {
viper.SetDefault("ephemeral_node_inactivity_timeout", "120s")
viper.SetDefault("node_update_check_interval", "10s")
if err := viper.ReadInConfig(); err != nil {
log.Warn().Err(err).Msg("Failed to read configuration from disk")
@@ -217,6 +220,15 @@ func LoadConfig(path string, isFile bool) error {
)
}
maxNodeUpdateCheckInterval, _ := time.ParseDuration("60s")
if viper.GetDuration("node_update_check_interval") > maxNodeUpdateCheckInterval {
errorText += fmt.Sprintf(
"Fatal config error: node_update_check_interval (%s) is set too high, must be less than %s",
viper.GetString("node_update_check_interval"),
maxNodeUpdateCheckInterval,
)
}
if errorText != "" {
//nolint
return errors.New(strings.TrimSuffix(errorText, "\n"))
@@ -478,6 +490,10 @@ func GetHeadscaleConfig() (*Config, error) {
"ephemeral_node_inactivity_timeout",
),
NodeUpdateCheckInterval: viper.GetDuration(
"node_update_check_interval",
),
DBtype: viper.GetString("db_type"),
DBpath: AbsolutePathFromConfigPath(viper.GetString("db_path")),
DBhost: viper.GetString("db_host"),

View File

@@ -40,13 +40,13 @@ func (s *IntegrationCLITestSuite) SetupTest() {
if ppool, err := dockertest.NewPool(""); err == nil {
s.pool = *ppool
} else {
log.Fatalf("Could not connect to docker: %s", err)
s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
}
if pnetwork, err := s.pool.CreateNetwork("headscale-test"); err == nil {
s.network = *pnetwork
} else {
log.Fatalf("Could not create network: %s", err)
s.FailNow(fmt.Sprintf("Could not create network: %s", err), "")
}
headscaleBuildOptions := &dockertest.BuildOptions{
@@ -56,7 +56,7 @@ func (s *IntegrationCLITestSuite) SetupTest() {
currentPath, err := os.Getwd()
if err != nil {
log.Fatalf("Could not determine current path: %s", err)
s.FailNow(fmt.Sprintf("Could not determine current path: %s", err), "")
}
headscaleOptions := &dockertest.RunOptions{
@@ -68,11 +68,16 @@ func (s *IntegrationCLITestSuite) SetupTest() {
Cmd: []string{"headscale", "serve"},
}
err = s.pool.RemoveContainerByName(headscaleHostname)
if err != nil {
s.FailNow(fmt.Sprintf("Could not remove existing container before building test: %s", err), "")
}
fmt.Println("Creating headscale container")
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
s.headscale = *pheadscale
} else {
log.Fatalf("Could not start headscale container: %s", err)
s.FailNow(fmt.Sprintf("Could not start headscale container: %s", err), "")
}
fmt.Println("Created headscale container")

View File

@@ -6,7 +6,10 @@ package headscale
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"os"
"strconv"
"strings"
"time"
@@ -16,9 +19,13 @@ import (
"inet.af/netaddr"
)
const DOCKER_EXECUTE_TIMEOUT = 10 * time.Second
const (
DOCKER_EXECUTE_TIMEOUT = 10 * time.Second
)
var (
errEnvVarEmpty = errors.New("getenv: environment variable empty")
IpPrefix4 = netaddr.MustParseIPPrefix("100.64.0.0/10")
IpPrefix6 = netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/48")
@@ -283,3 +290,25 @@ func getMagicFQDN(
return hostnames, nil
}
func GetEnvStr(key string) (string, error) {
v := os.Getenv(key)
if v == "" {
return v, errEnvVarEmpty
}
return v, nil
}
func GetEnvBool(key string) (bool, error) {
s, err := GetEnvStr(key)
if err != nil {
return false, err
}
v, err := strconv.ParseBool(s)
if err != nil {
return false, err
}
return v, nil
}

View File

@@ -40,41 +40,50 @@ type IntegrationDERPTestSuite struct {
pool dockertest.Pool
networks map[int]dockertest.Network // so we keep the containers isolated
headscale dockertest.Resource
saveLogs bool
tailscales map[string]dockertest.Resource
joinWaitGroup sync.WaitGroup
}
func TestDERPIntegrationTestSuite(t *testing.T) {
saveLogs, err := GetEnvBool("HEADSCALE_INTEGRATION_SAVE_LOG")
if err != nil {
saveLogs = false
}
s := new(IntegrationDERPTestSuite)
s.tailscales = make(map[string]dockertest.Resource)
s.networks = make(map[int]dockertest.Network)
s.saveLogs = saveLogs
suite.Run(t, s)
// HandleStats, which allows us to check if we passed and save logs
// is called after TearDown, so we cannot tear down containers before
// we have potentially saved the logs.
for _, tailscale := range s.tailscales {
if err := s.pool.Purge(&tailscale); err != nil {
if s.saveLogs {
for _, tailscale := range s.tailscales {
if err := s.pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}
if !s.stats.Passed() {
err := s.saveLog(&s.headscale, "test_output")
if err != nil {
log.Printf("Could not save log: %s\n", err)
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}
if !s.stats.Passed() {
err := s.saveLog(&s.headscale, "test_output")
if err != nil {
log.Printf("Could not save log: %s\n", err)
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
for _, network := range s.networks {
if err := network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
for _, network := range s.networks {
if err := network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
}
}
}
}
@@ -83,14 +92,14 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
if ppool, err := dockertest.NewPool(""); err == nil {
s.pool = *ppool
} else {
log.Fatalf("Could not connect to docker: %s", err)
s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
}
for i := 0; i < totalContainers; i++ {
if pnetwork, err := s.pool.CreateNetwork(fmt.Sprintf("headscale-derp-%d", i)); err == nil {
s.networks[i] = *pnetwork
} else {
log.Fatalf("Could not create network: %s", err)
s.FailNow(fmt.Sprintf("Could not create network: %s", err), "")
}
}
@@ -101,7 +110,7 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
currentPath, err := os.Getwd()
if err != nil {
log.Fatalf("Could not determine current path: %s", err)
s.FailNow(fmt.Sprintf("Could not determine current path: %s", err), "")
}
headscaleOptions := &dockertest.RunOptions{
@@ -120,11 +129,16 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
},
}
err = s.pool.RemoveContainerByName(headscaleHostname)
if err != nil {
s.FailNow(fmt.Sprintf("Could not remove existing container before building test: %s", err), "")
}
log.Println("Creating headscale container")
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
s.headscale = *pheadscale
} else {
log.Fatalf("Could not start headscale container: %s", err)
s.FailNow(fmt.Sprintf("Could not start headscale container: %s", err), "")
}
log.Println("Created headscale container to test DERP")
@@ -290,6 +304,23 @@ func (s *IntegrationDERPTestSuite) tailscaleContainer(
}
func (s *IntegrationDERPTestSuite) TearDownSuite() {
if !s.saveLogs {
for _, tailscale := range s.tailscales {
if err := s.pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
for _, network := range s.networks {
if err := network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
}
}
}
}
func (s *IntegrationDERPTestSuite) HandleStats(

View File

@@ -36,6 +36,7 @@ type IntegrationTestSuite struct {
pool dockertest.Pool
network dockertest.Network
headscale dockertest.Resource
saveLogs bool
namespaces map[string]TestNamespace
@@ -43,6 +44,11 @@ type IntegrationTestSuite struct {
}
func TestIntegrationTestSuite(t *testing.T) {
saveLogs, err := GetEnvBool("HEADSCALE_INTEGRATION_SAVE_LOG")
if err != nil {
saveLogs = false
}
s := new(IntegrationTestSuite)
s.namespaces = map[string]TestNamespace{
@@ -55,32 +61,35 @@ func TestIntegrationTestSuite(t *testing.T) {
tailscales: make(map[string]dockertest.Resource),
},
}
s.saveLogs = saveLogs
suite.Run(t, s)
// HandleStats, which allows us to check if we passed and save logs
// is called after TearDown, so we cannot tear down containers before
// we have potentially saved the logs.
for _, scales := range s.namespaces {
for _, tailscale := range scales.tailscales {
if err := s.pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
if s.saveLogs {
for _, scales := range s.namespaces {
for _, tailscale := range scales.tailscales {
if err := s.pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}
}
}
if !s.stats.Passed() {
err := s.saveLog(&s.headscale, "test_output")
if err != nil {
log.Printf("Could not save log: %s\n", err)
if !s.stats.Passed() {
err := s.saveLog(&s.headscale, "test_output")
if err != nil {
log.Printf("Could not save log: %s\n", err)
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
if err := s.network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
if err := s.network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
}
}
}
@@ -209,13 +218,13 @@ func (s *IntegrationTestSuite) SetupSuite() {
if ppool, err := dockertest.NewPool(""); err == nil {
s.pool = *ppool
} else {
log.Fatalf("Could not connect to docker: %s", err)
s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
}
if pnetwork, err := s.pool.CreateNetwork("headscale-test"); err == nil {
s.network = *pnetwork
} else {
log.Fatalf("Could not create network: %s", err)
s.FailNow(fmt.Sprintf("Could not create network: %s", err), "")
}
headscaleBuildOptions := &dockertest.BuildOptions{
@@ -225,7 +234,7 @@ func (s *IntegrationTestSuite) SetupSuite() {
currentPath, err := os.Getwd()
if err != nil {
log.Fatalf("Could not determine current path: %s", err)
s.FailNow(fmt.Sprintf("Could not determine current path: %s", err), "")
}
headscaleOptions := &dockertest.RunOptions{
@@ -237,11 +246,16 @@ func (s *IntegrationTestSuite) SetupSuite() {
Cmd: []string{"headscale", "serve"},
}
err = s.pool.RemoveContainerByName(headscaleHostname)
if err != nil {
s.FailNow(fmt.Sprintf("Could not remove existing container before building test: %s", err), "")
}
log.Println("Creating headscale container")
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
s.headscale = *pheadscale
} else {
log.Fatalf("Could not start headscale container: %s", err)
s.FailNow(fmt.Sprintf("Could not start headscale container: %s", err), "")
}
log.Println("Created headscale container")
@@ -338,6 +352,23 @@ func (s *IntegrationTestSuite) SetupSuite() {
}
func (s *IntegrationTestSuite) TearDownSuite() {
if !s.saveLogs {
for _, scales := range s.namespaces {
for _, tailscale := range scales.tailscales {
if err := s.pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
if err := s.network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
}
}
}
func (s *IntegrationTestSuite) HandleStats(

View File

@@ -20,6 +20,7 @@ dns_config:
nameservers:
- 1.1.1.1
ephemeral_node_inactivity_timeout: 30m
node_update_check_interval: 10s
grpc_allow_insecure: false
grpc_listen_addr: :50443
ip_prefixes:

View File

@@ -2,6 +2,7 @@ log_level: trace
acl_policy_path: ""
db_type: sqlite3
ephemeral_node_inactivity_timeout: 30m
node_update_check_interval: 10s
ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10

View File

@@ -20,6 +20,7 @@ dns_config:
nameservers:
- 1.1.1.1
ephemeral_node_inactivity_timeout: 30m
node_update_check_interval: 10s
grpc_allow_insecure: false
grpc_listen_addr: :50443
ip_prefixes:

View File

@@ -2,6 +2,7 @@ log_level: trace
acl_policy_path: ""
db_type: sqlite3
ephemeral_node_inactivity_timeout: 30m
node_update_check_interval: 10s
ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10

View File

@@ -2,6 +2,7 @@ log_level: trace
acl_policy_path: ""
db_type: sqlite3
ephemeral_node_inactivity_timeout: 30m
node_update_check_interval: 10s
ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10

View File

@@ -16,8 +16,7 @@ import (
)
const (
keepAliveInterval = 60 * time.Second
updateCheckInterval = 10 * time.Second
keepAliveInterval = 60 * time.Second
)
type contextKey string
@@ -640,7 +639,7 @@ func (h *Headscale) scheduledPollWorker(
machine *Machine,
) {
keepAliveTicker := time.NewTicker(keepAliveInterval)
updateCheckerTicker := time.NewTicker(updateCheckInterval)
updateCheckerTicker := time.NewTicker(h.cfg.NodeUpdateCheckInterval)
defer closeChanWithLog(
updateChan,