mirror of
https://github.com/juanfont/headscale.git
synced 2026-03-28 04:11:41 +01:00
Split TestIssues into 7 focused test functions to stay under cyclomatic complexity limits while testing more aggressively. Issues surfaced (4 failing tests): 1. initial_map_should_include_peer_online_status: Initial MapResponse has Online=nil for peers. Online status only arrives later via PeersChangedPatch. 2. disco_key_should_propagate_to_peers: DiscoPublicKey set by client is not visible to peers. Peers see zero disco key. 3. approved_route_without_announcement_is_visible: Server-side route approval without client-side announcement silently produces empty SubnetRoutes (intersection of empty announced + approved = empty). 4. nodestore_correct_after_rapid_reconnect: After 5 rapid reconnect cycles, NodeStore reports node as offline despite having an active poll session. The connect/disconnect grace period interleaving leaves IsOnline in an incorrect state. Passing tests (20) verify: - IP uniqueness across 10 nodes - IP stability across reconnect - New peers have addresses immediately - Node rename propagates to peers - Node delete removes from all peer lists - Hostinfo changes (OS field) propagate - NodeStore/DB consistency after route mutations - Grace period timing (8-20s window) - Ephemeral node deletion (not just offline) - 10-node simultaneous connect convergence - Rapid sequential node additions - Reconnect produces complete map - Cross-user visibility with default policy - Same-user multiple nodes get distinct IDs - Same-hostname nodes get unique GivenNames - Policy change during connect still converges - DERP region references are valid - User profiles present for self and peers - Self-update arrives after route approval - Route advertisement stored as AnnouncedRoutes
141 lines
4.5 KiB
Go
141 lines
4.5 KiB
Go
package servertest_test
|
|
|
|
import (
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/juanfont/headscale/hscontrol/servertest"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
"tailscale.com/types/netmap"
|
|
)
|
|
|
|
// TestEphemeralNodes tests the lifecycle of ephemeral nodes,
|
|
// which should be automatically cleaned up when they disconnect.
|
|
func TestEphemeralNodes(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
t.Run("ephemeral_connects_and_sees_peers", func(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
srv := servertest.NewServer(t,
|
|
servertest.WithEphemeralTimeout(5*time.Second))
|
|
user := srv.CreateUser(t, "eph-user")
|
|
|
|
regular := servertest.NewClient(t, srv, "eph-regular",
|
|
servertest.WithUser(user))
|
|
ephemeral := servertest.NewClient(t, srv, "eph-ephemeral",
|
|
servertest.WithUser(user), servertest.WithEphemeral())
|
|
|
|
// Both should see each other.
|
|
regular.WaitForPeers(t, 1, 10*time.Second)
|
|
ephemeral.WaitForPeers(t, 1, 10*time.Second)
|
|
|
|
_, found := regular.PeerByName("eph-ephemeral")
|
|
assert.True(t, found, "regular should see ephemeral peer")
|
|
|
|
_, found = ephemeral.PeerByName("eph-regular")
|
|
assert.True(t, found, "ephemeral should see regular peer")
|
|
})
|
|
|
|
t.Run("ephemeral_cleanup_after_disconnect", func(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
// Use a short ephemeral timeout so the test doesn't take long.
|
|
srv := servertest.NewServer(t,
|
|
servertest.WithEphemeralTimeout(3*time.Second))
|
|
user := srv.CreateUser(t, "eph-cleanup-user")
|
|
|
|
regular := servertest.NewClient(t, srv, "eph-cleanup-regular",
|
|
servertest.WithUser(user))
|
|
ephemeral := servertest.NewClient(t, srv, "eph-cleanup-ephemeral",
|
|
servertest.WithUser(user), servertest.WithEphemeral())
|
|
|
|
regular.WaitForPeers(t, 1, 10*time.Second)
|
|
|
|
// Verify ephemeral peer is present before disconnect.
|
|
_, found := regular.PeerByName("eph-cleanup-ephemeral")
|
|
require.True(t, found, "ephemeral peer should be visible before disconnect")
|
|
|
|
// Ensure the ephemeral node's long-poll session is fully
|
|
// established on the server before disconnecting. Without
|
|
// this, the Disconnect may cancel a PollNetMap that hasn't
|
|
// yet reached serveLongPoll, so no grace period or ephemeral
|
|
// GC would ever be scheduled.
|
|
ephemeral.WaitForPeers(t, 1, 10*time.Second)
|
|
|
|
// Disconnect the ephemeral node.
|
|
ephemeral.Disconnect(t)
|
|
|
|
// After the grace period (10s) + ephemeral timeout (3s),
|
|
// the ephemeral node should be deleted from the server and
|
|
// disappear from the regular node's peer list entirely.
|
|
// Unlike non-ephemeral nodes which go offline but stay in
|
|
// the peer list, ephemeral nodes should be garbage collected.
|
|
regular.WaitForCondition(t, "ephemeral peer removed from peer list",
|
|
60*time.Second,
|
|
func(nm *netmap.NetworkMap) bool {
|
|
for _, p := range nm.Peers {
|
|
hi := p.Hostinfo()
|
|
if hi.Valid() && hi.Hostname() == "eph-cleanup-ephemeral" {
|
|
return false // still present
|
|
}
|
|
}
|
|
|
|
return true // gone
|
|
})
|
|
})
|
|
|
|
t.Run("ephemeral_and_regular_mixed", func(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
srv := servertest.NewServer(t,
|
|
servertest.WithEphemeralTimeout(5*time.Second))
|
|
user := srv.CreateUser(t, "mix-user")
|
|
|
|
r1 := servertest.NewClient(t, srv, "mix-regular-1",
|
|
servertest.WithUser(user))
|
|
r2 := servertest.NewClient(t, srv, "mix-regular-2",
|
|
servertest.WithUser(user))
|
|
e1 := servertest.NewClient(t, srv, "mix-eph-1",
|
|
servertest.WithUser(user), servertest.WithEphemeral())
|
|
|
|
// All three should see each other.
|
|
r1.WaitForPeers(t, 2, 15*time.Second)
|
|
r2.WaitForPeers(t, 2, 15*time.Second)
|
|
e1.WaitForPeers(t, 2, 15*time.Second)
|
|
|
|
servertest.AssertMeshComplete(t,
|
|
[]*servertest.TestClient{r1, r2, e1})
|
|
})
|
|
|
|
t.Run("ephemeral_reconnect_prevents_cleanup", func(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
srv := servertest.NewServer(t,
|
|
servertest.WithEphemeralTimeout(5*time.Second))
|
|
user := srv.CreateUser(t, "eph-recon-user")
|
|
|
|
regular := servertest.NewClient(t, srv, "eph-recon-regular",
|
|
servertest.WithUser(user))
|
|
ephemeral := servertest.NewClient(t, srv, "eph-recon-ephemeral",
|
|
servertest.WithUser(user), servertest.WithEphemeral())
|
|
|
|
regular.WaitForPeers(t, 1, 10*time.Second)
|
|
|
|
// Ensure the ephemeral node's long-poll is established.
|
|
ephemeral.WaitForPeers(t, 1, 10*time.Second)
|
|
|
|
// Disconnect and quickly reconnect.
|
|
ephemeral.Disconnect(t)
|
|
ephemeral.Reconnect(t)
|
|
|
|
// After reconnecting, the ephemeral node should still be visible.
|
|
regular.WaitForPeers(t, 1, 15*time.Second)
|
|
|
|
_, found := regular.PeerByName("eph-recon-ephemeral")
|
|
assert.True(t, found,
|
|
"ephemeral node should still be visible after quick reconnect")
|
|
})
|
|
}
|