mirror of
https://github.com/juanfont/headscale.git
synced 2026-04-01 23:13:31 +02:00
Wrap all 329 hardcoded EventuallyWithT timeouts across 12 test files with integrationutil.ScaledTimeout(), which applies a 2x multiplier on CI runners. This addresses the systemic issue where hardcoded timeouts that work locally are insufficient under CI resource contention. Variable-based timeouts (propagationTime, assertTimeout in route_test.go and totalWaitTime in auth_oidc_test.go) are wrapped at their definition site so all downstream usages benefit. The retry intervals (second duration parameter) are intentionally NOT scaled, as they control polling frequency, not total wait time. Updates #3125
225 lines
6.4 KiB
Go
225 lines
6.4 KiB
Go
package integration
|
|
|
|
import (
|
|
"encoding/json"
|
|
"fmt"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/juanfont/headscale/integration/hsic"
|
|
"github.com/juanfont/headscale/integration/integrationutil"
|
|
"github.com/juanfont/headscale/integration/tsic"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
"tailscale.com/tailcfg"
|
|
)
|
|
|
|
func TestResolveMagicDNS(t *testing.T) {
|
|
IntegrationSkip(t)
|
|
|
|
spec := ScenarioSpec{
|
|
NodesPerUser: len(MustTestVersions),
|
|
Users: []string{"user1", "user2"},
|
|
}
|
|
|
|
scenario, err := NewScenario(spec)
|
|
|
|
require.NoError(t, err)
|
|
defer scenario.ShutdownAssertNoPanics(t)
|
|
|
|
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("magicdns"))
|
|
requireNoErrHeadscaleEnv(t, err)
|
|
|
|
allClients, err := scenario.ListTailscaleClients()
|
|
requireNoErrListClients(t, err)
|
|
|
|
err = scenario.WaitForTailscaleSync()
|
|
requireNoErrSync(t, err)
|
|
|
|
// assertClientsState(t, allClients)
|
|
|
|
// Poor mans cache
|
|
_, err = scenario.ListTailscaleClientsFQDNs()
|
|
requireNoErrListFQDN(t, err)
|
|
|
|
_, err = scenario.ListTailscaleClientsIPs()
|
|
requireNoErrListClientIPs(t, err)
|
|
|
|
for _, client := range allClients {
|
|
for _, peer := range allClients {
|
|
// It is safe to ignore this error as we handled it when caching it
|
|
peerFQDN, _ := peer.FQDN()
|
|
|
|
assert.Equal(t, peer.Hostname()+".headscale.net.", peerFQDN)
|
|
|
|
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
|
command := []string{
|
|
"tailscale",
|
|
"ip", peerFQDN,
|
|
}
|
|
result, _, err := client.Execute(command)
|
|
assert.NoError(ct, err, "Failed to execute resolve/ip command %s from %s", peerFQDN, client.Hostname())
|
|
|
|
ips, err := peer.IPs()
|
|
assert.NoError(ct, err, "Failed to get IPs for %s", peer.Hostname())
|
|
|
|
for _, ip := range ips {
|
|
assert.Contains(ct, result, ip.String(), "IP %s should be found in DNS resolution result from %s to %s", ip.String(), client.Hostname(), peer.Hostname())
|
|
}
|
|
}, integrationutil.ScaledTimeout(30*time.Second), 2*time.Second)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestResolveMagicDNSExtraRecordsPath(t *testing.T) {
|
|
IntegrationSkip(t)
|
|
|
|
spec := ScenarioSpec{
|
|
NodesPerUser: 1,
|
|
Users: []string{"user1", "user2"},
|
|
}
|
|
|
|
scenario, err := NewScenario(spec)
|
|
|
|
require.NoError(t, err)
|
|
defer scenario.ShutdownAssertNoPanics(t)
|
|
|
|
const erPath = "/tmp/extra_records.json"
|
|
|
|
extraRecords := make([]tailcfg.DNSRecord, 0, 2)
|
|
extraRecords = append(extraRecords, tailcfg.DNSRecord{
|
|
Name: "test.myvpn.example.com",
|
|
Type: "A",
|
|
Value: "6.6.6.6",
|
|
})
|
|
b, _ := json.Marshal(extraRecords) //nolint:errchkjson
|
|
|
|
err = scenario.CreateHeadscaleEnv([]tsic.Option{
|
|
tsic.WithPackages("python3", "curl", "bind-tools"),
|
|
},
|
|
hsic.WithTestName("extrarecords"),
|
|
hsic.WithConfigEnv(map[string]string{
|
|
// Disable global nameservers to make the test run offline.
|
|
"HEADSCALE_DNS_NAMESERVERS_GLOBAL": "",
|
|
"HEADSCALE_DNS_EXTRA_RECORDS_PATH": erPath,
|
|
}),
|
|
hsic.WithFileInContainer(erPath, b),
|
|
)
|
|
requireNoErrHeadscaleEnv(t, err)
|
|
|
|
allClients, err := scenario.ListTailscaleClients()
|
|
requireNoErrListClients(t, err)
|
|
|
|
err = scenario.WaitForTailscaleSync()
|
|
requireNoErrSync(t, err)
|
|
|
|
// assertClientsState(t, allClients)
|
|
|
|
// Poor mans cache
|
|
_, err = scenario.ListTailscaleClientsFQDNs()
|
|
requireNoErrListFQDN(t, err)
|
|
|
|
_, err = scenario.ListTailscaleClientsIPs()
|
|
requireNoErrListClientIPs(t, err)
|
|
|
|
for _, client := range allClients {
|
|
assertCommandOutputContains(t, client, []string{"dig", "test.myvpn.example.com"}, "6.6.6.6")
|
|
}
|
|
|
|
hs, err := scenario.Headscale()
|
|
require.NoError(t, err)
|
|
|
|
// Write the file directly into place from the docker API.
|
|
b0, _ := json.Marshal([]tailcfg.DNSRecord{ //nolint:errchkjson
|
|
{
|
|
Name: "docker.myvpn.example.com",
|
|
Type: "A",
|
|
Value: "2.2.2.2",
|
|
},
|
|
})
|
|
|
|
err = hs.WriteFile(erPath, b0)
|
|
require.NoError(t, err)
|
|
|
|
for _, client := range allClients {
|
|
assertCommandOutputContains(t, client, []string{"dig", "docker.myvpn.example.com"}, "2.2.2.2")
|
|
}
|
|
|
|
// Write a new file and move it to the path to ensure the reload
|
|
// works when a file is moved atomically into place.
|
|
extraRecords = append(extraRecords, tailcfg.DNSRecord{
|
|
Name: "otherrecord.myvpn.example.com",
|
|
Type: "A",
|
|
Value: "7.7.7.7",
|
|
})
|
|
b2, _ := json.Marshal(extraRecords) //nolint:errchkjson
|
|
|
|
err = hs.WriteFile(erPath+"2", b2)
|
|
require.NoError(t, err)
|
|
_, err = hs.Execute([]string{"mv", erPath + "2", erPath})
|
|
require.NoError(t, err)
|
|
|
|
for _, client := range allClients {
|
|
assertCommandOutputContains(t, client, []string{"dig", "test.myvpn.example.com"}, "6.6.6.6")
|
|
assertCommandOutputContains(t, client, []string{"dig", "otherrecord.myvpn.example.com"}, "7.7.7.7")
|
|
}
|
|
|
|
// Write a new file and copy it to the path to ensure the reload
|
|
// works when a file is copied into place.
|
|
b3, _ := json.Marshal([]tailcfg.DNSRecord{ //nolint:errchkjson
|
|
{
|
|
Name: "copy.myvpn.example.com",
|
|
Type: "A",
|
|
Value: "8.8.8.8",
|
|
},
|
|
})
|
|
|
|
err = hs.WriteFile(erPath+"3", b3)
|
|
require.NoError(t, err)
|
|
_, err = hs.Execute([]string{"cp", erPath + "3", erPath})
|
|
require.NoError(t, err)
|
|
|
|
for _, client := range allClients {
|
|
assertCommandOutputContains(t, client, []string{"dig", "copy.myvpn.example.com"}, "8.8.8.8")
|
|
}
|
|
|
|
// Write in place to ensure pipe like behaviour works
|
|
b4, _ := json.Marshal([]tailcfg.DNSRecord{ //nolint:errchkjson
|
|
{
|
|
Name: "docker.myvpn.example.com",
|
|
Type: "A",
|
|
Value: "9.9.9.9",
|
|
},
|
|
})
|
|
command := []string{"echo", fmt.Sprintf("'%s'", string(b4)), ">", erPath}
|
|
_, err = hs.Execute([]string{"bash", "-c", strings.Join(command, " ")})
|
|
require.NoError(t, err)
|
|
|
|
for _, client := range allClients {
|
|
assertCommandOutputContains(t, client, []string{"dig", "docker.myvpn.example.com"}, "9.9.9.9")
|
|
}
|
|
|
|
// Delete the file and create a new one to ensure it is picked up again.
|
|
_, err = hs.Execute([]string{"rm", erPath})
|
|
require.NoError(t, err)
|
|
|
|
// The same paths should still be available as it is not cleared on delete.
|
|
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
|
for _, client := range allClients {
|
|
result, _, err := client.Execute([]string{"dig", "docker.myvpn.example.com"})
|
|
assert.NoError(ct, err)
|
|
assert.Contains(ct, result, "9.9.9.9")
|
|
}
|
|
}, integrationutil.ScaledTimeout(10*time.Second), 1*time.Second)
|
|
|
|
// Write a new file, the backoff mechanism should make the filewatcher pick it up
|
|
// again.
|
|
err = hs.WriteFile(erPath, b3)
|
|
require.NoError(t, err)
|
|
|
|
for _, client := range allClients {
|
|
assertCommandOutputContains(t, client, []string{"dig", "copy.myvpn.example.com"}, "8.8.8.8")
|
|
}
|
|
}
|