mirror of
https://github.com/juanfont/headscale.git
synced 2026-04-01 15:03:23 +02:00
Wrap all 329 hardcoded EventuallyWithT timeouts across 12 test files with integrationutil.ScaledTimeout(), which applies a 2x multiplier on CI runners. This addresses the systemic issue where hardcoded timeouts that work locally are insufficient under CI resource contention. Variable-based timeouts (propagationTime, assertTimeout in route_test.go and totalWaitTime in auth_oidc_test.go) are wrapped at their definition site so all downstream usages benefit. The retry intervals (second duration parameter) are intentionally NOT scaled, as they control polling frequency, not total wait time. Updates #3125
212 lines
6.4 KiB
Go
212 lines
6.4 KiB
Go
package integration
|
|
|
|
import (
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/juanfont/headscale/integration/hsic"
|
|
"github.com/juanfont/headscale/integration/integrationutil"
|
|
"github.com/juanfont/headscale/integration/tsic"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
"tailscale.com/tailcfg"
|
|
"tailscale.com/types/key"
|
|
)
|
|
|
|
type ClientsSpec struct {
|
|
Plain int
|
|
WebsocketDERP int
|
|
}
|
|
|
|
func TestDERPServerScenario(t *testing.T) {
|
|
spec := ScenarioSpec{
|
|
NodesPerUser: 1,
|
|
Users: []string{"user1", "user2", "user3"},
|
|
Networks: map[string][]string{
|
|
"usernet1": {"user1"},
|
|
"usernet2": {"user2"},
|
|
"usernet3": {"user3"},
|
|
},
|
|
}
|
|
|
|
derpServerScenario(t, spec, "derp-tcp", false, func(scenario *Scenario) {
|
|
allClients, err := scenario.ListTailscaleClients()
|
|
requireNoErrListClients(t, err)
|
|
t.Logf("checking %d clients for websocket connections", len(allClients))
|
|
|
|
for _, client := range allClients {
|
|
if didClientUseWebsocketForDERP(t, client) {
|
|
t.Logf(
|
|
"client %q used websocket a connection, but was not expected to",
|
|
client.Hostname(),
|
|
)
|
|
t.Fail()
|
|
}
|
|
}
|
|
|
|
hsServer, err := scenario.Headscale()
|
|
requireNoErrGetHeadscale(t, err)
|
|
|
|
derpRegion := tailcfg.DERPRegion{
|
|
RegionCode: "test-derpverify",
|
|
RegionName: "TestDerpVerify",
|
|
Nodes: []*tailcfg.DERPNode{
|
|
{
|
|
Name: "TestDerpVerify",
|
|
RegionID: 900,
|
|
HostName: hsServer.GetHostname(),
|
|
STUNPort: 3478,
|
|
STUNOnly: false,
|
|
DERPPort: 443,
|
|
InsecureForTests: true,
|
|
},
|
|
},
|
|
}
|
|
|
|
fakeKey := key.NewNode()
|
|
DERPVerify(t, fakeKey, derpRegion, false)
|
|
})
|
|
}
|
|
|
|
func TestDERPServerWebsocketScenario(t *testing.T) {
|
|
spec := ScenarioSpec{
|
|
NodesPerUser: 1,
|
|
Users: []string{"user1", "user2", "user3"},
|
|
Networks: map[string][]string{
|
|
"usernet1": {"user1"},
|
|
"usernet2": {"user2"},
|
|
"usernet3": {"user3"},
|
|
},
|
|
}
|
|
|
|
derpServerScenario(t, spec, "derp-ws", true, func(scenario *Scenario) {
|
|
allClients, err := scenario.ListTailscaleClients()
|
|
requireNoErrListClients(t, err)
|
|
t.Logf("checking %d clients for websocket connections", len(allClients))
|
|
|
|
for _, client := range allClients {
|
|
if !didClientUseWebsocketForDERP(t, client) {
|
|
t.Logf(
|
|
"client %q does not seem to have used a websocket connection, even though it was expected to do so",
|
|
client.Hostname(),
|
|
)
|
|
t.Fail()
|
|
}
|
|
}
|
|
})
|
|
}
|
|
|
|
// This function implements the common parts of a DERP scenario,
|
|
// we *want* it to show up in stacktraces,
|
|
// so marking it as a test helper would be counterproductive.
|
|
//
|
|
//nolint:thelper
|
|
func derpServerScenario(
|
|
t *testing.T,
|
|
spec ScenarioSpec,
|
|
testName string,
|
|
websocket bool,
|
|
furtherAssertions ...func(*Scenario),
|
|
) {
|
|
IntegrationSkip(t)
|
|
|
|
scenario, err := NewScenario(spec)
|
|
require.NoError(t, err)
|
|
|
|
defer scenario.ShutdownAssertNoPanics(t)
|
|
|
|
err = scenario.CreateHeadscaleEnv(
|
|
[]tsic.Option{
|
|
tsic.WithWebsocketDERP(websocket),
|
|
},
|
|
hsic.WithTestName(testName),
|
|
// Expose STUN port for DERP NAT traversal.
|
|
hsic.WithExtraPorts([]string{"3478/udp"}),
|
|
// DERP clients expect the server on the standard HTTPS port.
|
|
hsic.WithPort(443),
|
|
hsic.WithConfigEnv(map[string]string{
|
|
"HEADSCALE_DERP_AUTO_UPDATE_ENABLED": "true",
|
|
"HEADSCALE_DERP_UPDATE_FREQUENCY": "10s",
|
|
"HEADSCALE_LISTEN_ADDR": "0.0.0.0:443",
|
|
"HEADSCALE_DERP_SERVER_VERIFY_CLIENTS": "true",
|
|
}),
|
|
)
|
|
requireNoErrHeadscaleEnv(t, err)
|
|
|
|
allClients, err := scenario.ListTailscaleClients()
|
|
requireNoErrListClients(t, err)
|
|
|
|
err = scenario.WaitForTailscaleSync()
|
|
requireNoErrSync(t, err)
|
|
|
|
allHostnames, err := scenario.ListTailscaleClientsFQDNs()
|
|
requireNoErrListFQDN(t, err)
|
|
|
|
for _, client := range allClients {
|
|
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
|
status, err := client.Status()
|
|
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
|
|
|
|
for _, health := range status.Health {
|
|
assert.NotContains(ct, health, "could not connect to any relay server",
|
|
"Client %s should be connected to DERP relay", client.Hostname())
|
|
assert.NotContains(ct, health, "could not connect to the 'Headscale Embedded DERP' relay server.",
|
|
"Client %s should be connected to Headscale Embedded DERP", client.Hostname())
|
|
}
|
|
}, integrationutil.ScaledTimeout(30*time.Second), 2*time.Second)
|
|
}
|
|
|
|
success := pingDerpAllHelper(t, allClients, allHostnames)
|
|
if len(allHostnames)*len(allClients) > success {
|
|
t.FailNow()
|
|
|
|
return
|
|
}
|
|
|
|
for _, client := range allClients {
|
|
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
|
status, err := client.Status()
|
|
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
|
|
|
|
for _, health := range status.Health {
|
|
assert.NotContains(ct, health, "could not connect to any relay server",
|
|
"Client %s should be connected to DERP relay after first run", client.Hostname())
|
|
assert.NotContains(ct, health, "could not connect to the 'Headscale Embedded DERP' relay server.",
|
|
"Client %s should be connected to Headscale Embedded DERP after first run", client.Hostname())
|
|
}
|
|
}, integrationutil.ScaledTimeout(30*time.Second), 2*time.Second)
|
|
}
|
|
|
|
t.Logf("Run 1: %d successful pings out of %d", success, len(allClients)*len(allHostnames))
|
|
|
|
// Let the DERP updater run a couple of times to ensure it does not
|
|
// break the DERPMap. The updater runs on a 10s interval by default.
|
|
//nolint:forbidigo // Intentional delay: must wait for DERP updater to run multiple times (interval-based)
|
|
time.Sleep(30 * time.Second)
|
|
|
|
success = pingDerpAllHelper(t, allClients, allHostnames)
|
|
if len(allHostnames)*len(allClients) > success {
|
|
t.Fail()
|
|
}
|
|
|
|
for _, client := range allClients {
|
|
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
|
status, err := client.Status()
|
|
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
|
|
|
|
for _, health := range status.Health {
|
|
assert.NotContains(ct, health, "could not connect to any relay server",
|
|
"Client %s should be connected to DERP relay after second run", client.Hostname())
|
|
assert.NotContains(ct, health, "could not connect to the 'Headscale Embedded DERP' relay server.",
|
|
"Client %s should be connected to Headscale Embedded DERP after second run", client.Hostname())
|
|
}
|
|
}, integrationutil.ScaledTimeout(30*time.Second), 2*time.Second)
|
|
}
|
|
|
|
t.Logf("Run2: %d successful pings out of %d", success, len(allClients)*len(allHostnames))
|
|
|
|
for _, check := range furtherAssertions {
|
|
check(scenario)
|
|
}
|
|
}
|