Files
headscale/hscontrol/servertest/lifecycle_test.go
Kristoffer Dalby f87b08676d hscontrol/servertest: add policy, route, ephemeral, and content tests
Extend the servertest harness with:
- TestClient.Direct() accessor for advanced operations
- TestClient.WaitForPeerCount and WaitForCondition helpers
- TestHarness.ChangePolicy for ACL policy testing
- AssertDERPMapPresent and AssertSelfHasAddresses

New test suites:
- content_test.go: self node, DERP map, peer properties, user profiles,
  update history monotonicity, and endpoint update propagation
- policy_test.go: default allow-all, explicit policy, policy triggers
  updates on all nodes, multiple policy changes, multi-user mesh
- ephemeral_test.go: ephemeral connect, cleanup after disconnect,
  mixed ephemeral/regular, reconnect prevents cleanup
- routes_test.go: addresses in AllowedIPs, route advertise and approve,
  advertised routes via hostinfo, CGNAT range validation

Also fix node_departs test to use WaitForCondition instead of
assert.Eventually, and convert concurrent_join_and_leave to
interleaved_join_and_leave with grace-period-tolerant assertions.
2026-03-19 07:05:58 +01:00

102 lines
2.8 KiB
Go

package servertest_test
import (
"fmt"
"testing"
"time"
"github.com/juanfont/headscale/hscontrol/servertest"
"github.com/stretchr/testify/assert"
"tailscale.com/types/netmap"
)
// TestConnectionLifecycle exercises the core node lifecycle:
// connecting, seeing peers, joining mid-session, departing, and
// reconnecting.
func TestConnectionLifecycle(t *testing.T) {
t.Parallel()
t.Run("single_node", func(t *testing.T) {
t.Parallel()
h := servertest.NewHarness(t, 1)
nm := h.Client(0).Netmap()
assert.NotNil(t, nm, "single node should receive a netmap")
assert.Empty(t, nm.Peers, "single node should have no peers")
})
t.Run("new_node_joins_mesh", func(t *testing.T) {
t.Parallel()
h := servertest.NewHarness(t, 3)
// Add a 4th client mid-test.
h.AddClient(t)
h.WaitForMeshComplete(t, 10*time.Second)
servertest.AssertMeshComplete(t, h.Clients())
servertest.AssertSymmetricVisibility(t, h.Clients())
})
t.Run("node_departs_peers_update", func(t *testing.T) {
t.Parallel()
h := servertest.NewHarness(t, 3)
departingName := h.Client(2).Name
h.Client(2).Disconnect(t)
// The remaining clients should eventually see the departed
// node go offline or disappear. The grace period in poll.go
// is 10 seconds, so we need a generous timeout.
h.Client(0).WaitForCondition(t, "peer offline or gone", 60*time.Second,
func(nm *netmap.NetworkMap) bool {
for _, p := range nm.Peers {
hi := p.Hostinfo()
if hi.Valid() && hi.Hostname() == departingName {
isOnline, known := p.Online().GetOk()
// Peer is still present but offline is acceptable.
return known && !isOnline
}
}
// Peer gone entirely is also acceptable.
return true
})
})
t.Run("reconnect_restores_mesh", func(t *testing.T) {
t.Parallel()
h := servertest.NewHarness(t, 2)
// Disconnect and reconnect.
h.Client(0).Disconnect(t)
h.Client(0).Reconnect(t)
// Mesh should recover.
h.WaitForMeshComplete(t, 15*time.Second)
servertest.AssertMeshComplete(t, h.Clients())
})
t.Run("session_replacement", func(t *testing.T) {
t.Parallel()
h := servertest.NewHarness(t, 2)
// Reconnect without explicitly waiting for the old session to
// fully drain. This tests that Headscale correctly replaces
// the old map session for the same node.
h.Client(0).Reconnect(t)
h.WaitForMeshComplete(t, 15*time.Second)
servertest.AssertMeshComplete(t, h.Clients())
})
t.Run("multiple_nodes_join_sequentially", func(t *testing.T) {
t.Parallel()
sizes := []int{2, 5, 10}
for _, n := range sizes {
t.Run(fmt.Sprintf("%d_nodes", n), func(t *testing.T) {
t.Parallel()
h := servertest.NewHarness(t, n)
servertest.AssertMeshComplete(t, h.Clients())
servertest.AssertSymmetricVisibility(t, h.Clients())
})
}
})
}