mirror of
https://github.com/juanfont/headscale.git
synced 2026-04-25 01:59:07 +02:00
golangci-lint: use forbidigo to block time.Sleep (#2946)
This commit is contained in:
@@ -1,9 +1,9 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -68,31 +68,18 @@ func TestEphemeralGarbageCollectorGoRoutineLeak(t *testing.T) {
|
||||
gc.Cancel(nodeID)
|
||||
}
|
||||
|
||||
// Create a channel to signal when we're done with cleanup checks
|
||||
cleanupDone := make(chan struct{})
|
||||
// Close GC
|
||||
gc.Close()
|
||||
|
||||
// Close GC and check for leaks in a separate goroutine
|
||||
go func() {
|
||||
// Close GC
|
||||
gc.Close()
|
||||
|
||||
// Give any potential leaked goroutines a chance to exit
|
||||
// Still need a small sleep here as we're checking for absence of goroutines
|
||||
time.Sleep(oneHundred)
|
||||
|
||||
// Check for leaked goroutines
|
||||
// Wait for goroutines to clean up and verify no leaks
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
finalGoroutines := runtime.NumGoroutine()
|
||||
t.Logf("Final number of goroutines: %d", finalGoroutines)
|
||||
|
||||
// NB: We have to allow for a small number of extra goroutines because of test itself
|
||||
assert.LessOrEqual(t, finalGoroutines, initialGoroutines+5,
|
||||
assert.LessOrEqual(c, finalGoroutines, initialGoroutines+5,
|
||||
"There are significantly more goroutines after GC usage, which suggests a leak")
|
||||
}, time.Second, 10*time.Millisecond, "goroutines should clean up after GC close")
|
||||
|
||||
close(cleanupDone)
|
||||
}()
|
||||
|
||||
// Wait for cleanup to complete
|
||||
<-cleanupDone
|
||||
t.Logf("Final number of goroutines: %d", runtime.NumGoroutine())
|
||||
}
|
||||
|
||||
// TestEphemeralGarbageCollectorReschedule is a test for the rescheduling of nodes in EphemeralGarbageCollector().
|
||||
@@ -103,10 +90,14 @@ func TestEphemeralGarbageCollectorReschedule(t *testing.T) {
|
||||
var deletedIDs []types.NodeID
|
||||
var deleteMutex sync.Mutex
|
||||
|
||||
deletionNotifier := make(chan types.NodeID, 1)
|
||||
|
||||
deleteFunc := func(nodeID types.NodeID) {
|
||||
deleteMutex.Lock()
|
||||
deletedIDs = append(deletedIDs, nodeID)
|
||||
deleteMutex.Unlock()
|
||||
|
||||
deletionNotifier <- nodeID
|
||||
}
|
||||
|
||||
// Start GC
|
||||
@@ -125,10 +116,15 @@ func TestEphemeralGarbageCollectorReschedule(t *testing.T) {
|
||||
// Reschedule the same node with a shorter expiry
|
||||
gc.Schedule(nodeID, shortExpiry)
|
||||
|
||||
// Wait for deletion
|
||||
time.Sleep(shortExpiry * 2)
|
||||
// Wait for deletion notification with timeout
|
||||
select {
|
||||
case deletedNodeID := <-deletionNotifier:
|
||||
assert.Equal(t, nodeID, deletedNodeID, "The correct node should be deleted")
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("Timed out waiting for node deletion")
|
||||
}
|
||||
|
||||
// Verify that the node was deleted once
|
||||
// Verify that the node was deleted exactly once
|
||||
deleteMutex.Lock()
|
||||
assert.Len(t, deletedIDs, 1, "Node should be deleted exactly once")
|
||||
assert.Equal(t, nodeID, deletedIDs[0], "The correct node should be deleted")
|
||||
@@ -203,18 +199,24 @@ func TestEphemeralGarbageCollectorCloseBeforeTimerFires(t *testing.T) {
|
||||
var deletedIDs []types.NodeID
|
||||
var deleteMutex sync.Mutex
|
||||
|
||||
deletionNotifier := make(chan types.NodeID, 1)
|
||||
|
||||
deleteFunc := func(nodeID types.NodeID) {
|
||||
deleteMutex.Lock()
|
||||
deletedIDs = append(deletedIDs, nodeID)
|
||||
deleteMutex.Unlock()
|
||||
|
||||
deletionNotifier <- nodeID
|
||||
}
|
||||
|
||||
// Start the GC
|
||||
gc := NewEphemeralGarbageCollector(deleteFunc)
|
||||
go gc.Start()
|
||||
|
||||
const longExpiry = 1 * time.Hour
|
||||
const shortExpiry = fifty
|
||||
const (
|
||||
longExpiry = 1 * time.Hour
|
||||
shortWait = fifty * 2
|
||||
)
|
||||
|
||||
// Schedule node deletion with a long expiry
|
||||
gc.Schedule(types.NodeID(1), longExpiry)
|
||||
@@ -222,8 +224,13 @@ func TestEphemeralGarbageCollectorCloseBeforeTimerFires(t *testing.T) {
|
||||
// Close the GC before the timer
|
||||
gc.Close()
|
||||
|
||||
// Wait a short time
|
||||
time.Sleep(shortExpiry * 2)
|
||||
// Verify that no deletion occurred within a reasonable time
|
||||
select {
|
||||
case <-deletionNotifier:
|
||||
t.Fatal("Node was deleted after GC was closed, which should not happen")
|
||||
case <-time.After(shortWait):
|
||||
// Expected: no deletion should occur
|
||||
}
|
||||
|
||||
// Verify that no deletion occurred
|
||||
deleteMutex.Lock()
|
||||
@@ -265,29 +272,17 @@ func TestEphemeralGarbageCollectorScheduleAfterClose(t *testing.T) {
|
||||
// Close GC right away
|
||||
gc.Close()
|
||||
|
||||
// Use a channel to signal when we should check for goroutine count
|
||||
gcClosedCheck := make(chan struct{})
|
||||
go func() {
|
||||
// Give the GC time to fully close and clean up resources
|
||||
// This is still time-based but only affects when we check the goroutine count,
|
||||
// not the actual test logic
|
||||
time.Sleep(oneHundred)
|
||||
close(gcClosedCheck)
|
||||
}()
|
||||
|
||||
// Now try to schedule node for deletion with a very short expiry
|
||||
// If the Schedule operation incorrectly creates a timer, it would fire quickly
|
||||
nodeID := types.NodeID(1)
|
||||
gc.Schedule(nodeID, 1*time.Millisecond)
|
||||
|
||||
// Set up a timeout channel for our test
|
||||
timeout := time.After(fiveHundred)
|
||||
|
||||
// Check if any node was deleted (which shouldn't happen)
|
||||
// Use timeout to wait for potential deletion
|
||||
select {
|
||||
case <-nodeDeleted:
|
||||
t.Fatal("Node was deleted after GC was closed, which should not happen")
|
||||
case <-timeout:
|
||||
case <-time.After(fiveHundred):
|
||||
// This is the expected path - no deletion should occur
|
||||
}
|
||||
|
||||
@@ -298,13 +293,14 @@ func TestEphemeralGarbageCollectorScheduleAfterClose(t *testing.T) {
|
||||
assert.Equal(t, 0, nodesDeleted, "No nodes should be deleted when Schedule is called after Close")
|
||||
|
||||
// Check for goroutine leaks after GC is fully closed
|
||||
<-gcClosedCheck
|
||||
finalGoroutines := runtime.NumGoroutine()
|
||||
t.Logf("Final number of goroutines: %d", finalGoroutines)
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
finalGoroutines := runtime.NumGoroutine()
|
||||
// Allow for small fluctuations in goroutine count for testing routines etc
|
||||
assert.LessOrEqual(c, finalGoroutines, initialGoroutines+2,
|
||||
"There should be no significant goroutine leaks when Schedule is called after Close")
|
||||
}, time.Second, 10*time.Millisecond, "goroutines should clean up after GC close")
|
||||
|
||||
// Allow for small fluctuations in goroutine count for testing routines etc
|
||||
assert.LessOrEqual(t, finalGoroutines, initialGoroutines+2,
|
||||
"There should be no significant goroutine leaks when Schedule is called after Close")
|
||||
t.Logf("Final number of goroutines: %d", runtime.NumGoroutine())
|
||||
}
|
||||
|
||||
// TestEphemeralGarbageCollectorConcurrentScheduleAndClose tests the behavior of the garbage collector
|
||||
@@ -331,7 +327,8 @@ func TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) {
|
||||
// Number of concurrent scheduling goroutines
|
||||
const numSchedulers = 10
|
||||
const nodesPerScheduler = 50
|
||||
const schedulingDuration = fiveHundred
|
||||
|
||||
const closeAfterNodes = 25 // Close GC after this many nodes per scheduler
|
||||
|
||||
// Use WaitGroup to wait for all scheduling goroutines to finish
|
||||
var wg sync.WaitGroup
|
||||
@@ -340,6 +337,9 @@ func TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) {
|
||||
// Create a stopper channel to signal scheduling goroutines to stop
|
||||
stopScheduling := make(chan struct{})
|
||||
|
||||
// Track how many nodes have been scheduled
|
||||
var scheduledCount int64
|
||||
|
||||
// Launch goroutines that continuously schedule nodes
|
||||
for schedulerIndex := range numSchedulers {
|
||||
go func(schedulerID int) {
|
||||
@@ -355,18 +355,23 @@ func TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) {
|
||||
default:
|
||||
nodeID := types.NodeID(baseNodeID + j + 1)
|
||||
gc.Schedule(nodeID, 1*time.Hour) // Long expiry to ensure it doesn't trigger during test
|
||||
atomic.AddInt64(&scheduledCount, 1)
|
||||
|
||||
// Random (short) sleep to introduce randomness/variability
|
||||
time.Sleep(time.Duration(rand.Intn(5)) * time.Millisecond)
|
||||
// Yield to other goroutines to introduce variability
|
||||
runtime.Gosched()
|
||||
}
|
||||
}
|
||||
}(schedulerIndex)
|
||||
}
|
||||
|
||||
// After a short delay, close the garbage collector while schedulers are still running
|
||||
// Close the garbage collector after some nodes have been scheduled
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
time.Sleep(schedulingDuration / 2)
|
||||
|
||||
// Wait until enough nodes have been scheduled
|
||||
for atomic.LoadInt64(&scheduledCount) < int64(numSchedulers*closeAfterNodes) {
|
||||
runtime.Gosched()
|
||||
}
|
||||
|
||||
// Close GC
|
||||
gc.Close()
|
||||
@@ -378,14 +383,13 @@ func TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) {
|
||||
// Wait for all goroutines to complete
|
||||
wg.Wait()
|
||||
|
||||
// Wait a bit longer to allow any leaked goroutines to do their work
|
||||
time.Sleep(oneHundred)
|
||||
// Check for leaks using EventuallyWithT
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
finalGoroutines := runtime.NumGoroutine()
|
||||
// Allow for a reasonable small variable routine count due to testing
|
||||
assert.LessOrEqual(c, finalGoroutines, initialGoroutines+5,
|
||||
"There should be no significant goroutine leaks during concurrent Schedule and Close operations")
|
||||
}, time.Second, 10*time.Millisecond, "goroutines should clean up")
|
||||
|
||||
// Check for leaks
|
||||
finalGoroutines := runtime.NumGoroutine()
|
||||
t.Logf("Final number of goroutines: %d", finalGoroutines)
|
||||
|
||||
// Allow for a reasonable small variable routine count due to testing
|
||||
assert.LessOrEqual(t, finalGoroutines, initialGoroutines+5,
|
||||
"There should be no significant goroutine leaks during concurrent Schedule and Close operations")
|
||||
t.Logf("Final number of goroutines: %d", runtime.NumGoroutine())
|
||||
}
|
||||
|
||||
@@ -6,7 +6,9 @@ import (
|
||||
"math/big"
|
||||
"net/netip"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -445,7 +447,7 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
RoutableIPs: tt.routes,
|
||||
},
|
||||
Tags: []string{"tag:exit"},
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.2")),
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.2")),
|
||||
}
|
||||
|
||||
err = adb.DB.Save(&nodeTagged).Error
|
||||
@@ -507,23 +509,48 @@ func TestEphemeralGarbageCollectorOrder(t *testing.T) {
|
||||
got := []types.NodeID{}
|
||||
var mu sync.Mutex
|
||||
|
||||
deletionCount := make(chan struct{}, 10)
|
||||
|
||||
e := NewEphemeralGarbageCollector(func(ni types.NodeID) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
got = append(got, ni)
|
||||
|
||||
deletionCount <- struct{}{}
|
||||
})
|
||||
go e.Start()
|
||||
|
||||
go e.Schedule(1, 1*time.Second)
|
||||
go e.Schedule(2, 2*time.Second)
|
||||
go e.Schedule(3, 3*time.Second)
|
||||
go e.Schedule(4, 4*time.Second)
|
||||
// Use shorter timeouts for faster tests
|
||||
go e.Schedule(1, 50*time.Millisecond)
|
||||
go e.Schedule(2, 100*time.Millisecond)
|
||||
go e.Schedule(3, 150*time.Millisecond)
|
||||
go e.Schedule(4, 200*time.Millisecond)
|
||||
|
||||
time.Sleep(time.Second)
|
||||
// Wait for first deletion (node 1 at 50ms)
|
||||
select {
|
||||
case <-deletionCount:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for first deletion")
|
||||
}
|
||||
|
||||
// Cancel nodes 2 and 4
|
||||
go e.Cancel(2)
|
||||
go e.Cancel(4)
|
||||
|
||||
time.Sleep(6 * time.Second)
|
||||
// Wait for node 3 to be deleted (at 150ms)
|
||||
select {
|
||||
case <-deletionCount:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for second deletion")
|
||||
}
|
||||
|
||||
// Give a bit more time for any unexpected deletions
|
||||
select {
|
||||
case <-deletionCount:
|
||||
// Unexpected - more deletions than expected
|
||||
case <-time.After(300 * time.Millisecond):
|
||||
// Expected - no more deletions
|
||||
}
|
||||
|
||||
e.Close()
|
||||
|
||||
@@ -541,20 +568,30 @@ func TestEphemeralGarbageCollectorLoads(t *testing.T) {
|
||||
|
||||
want := 1000
|
||||
|
||||
var deletedCount int64
|
||||
|
||||
e := NewEphemeralGarbageCollector(func(ni types.NodeID) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
time.Sleep(time.Duration(generateRandomNumber(t, 3)) * time.Millisecond)
|
||||
// Yield to other goroutines to introduce variability
|
||||
runtime.Gosched()
|
||||
got = append(got, ni)
|
||||
|
||||
atomic.AddInt64(&deletedCount, 1)
|
||||
})
|
||||
go e.Start()
|
||||
|
||||
// Use shorter expiry for faster tests
|
||||
for i := range want {
|
||||
go e.Schedule(types.NodeID(i), 1*time.Second)
|
||||
go e.Schedule(types.NodeID(i), 100*time.Millisecond) //nolint:gosec // test code, no overflow risk
|
||||
}
|
||||
|
||||
time.Sleep(10 * time.Second)
|
||||
// Wait for all deletions to complete
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
count := atomic.LoadInt64(&deletedCount)
|
||||
assert.Equal(c, int64(want), count, "all nodes should be deleted")
|
||||
}, 10*time.Second, 50*time.Millisecond, "waiting for all deletions")
|
||||
|
||||
e.Close()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user