mirror of
https://github.com/juanfont/headscale.git
synced 2026-01-16 14:06:46 +01:00
Compare commits
4 Commits
v0.23.0-al
...
v0.23.0-al
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a9c568c801 | ||
|
|
1c6bfc503c | ||
|
|
55b35f4160 | ||
|
|
d5ed8bc074 |
1
.github/workflows/test-integration.yaml
vendored
1
.github/workflows/test-integration.yaml
vendored
@@ -26,6 +26,7 @@ jobs:
|
||||
- TestPreAuthKeyCommand
|
||||
- TestPreAuthKeyCommandWithoutExpiry
|
||||
- TestPreAuthKeyCommandReusableEphemeral
|
||||
- TestPreAuthKeyCorrectUserLoggedInCommand
|
||||
- TestApiKeyCommand
|
||||
- TestNodeTagCommand
|
||||
- TestNodeAdvertiseTagNoACLCommand
|
||||
|
||||
@@ -51,7 +51,7 @@ func initConfig() {
|
||||
|
||||
cfg, err := types.GetHeadscaleConfig()
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msg("Failed to get headscale configuration")
|
||||
log.Fatal().Err(err).Msg("Failed to read headscale configuration")
|
||||
}
|
||||
|
||||
machineOutput := HasMachineOutputFlag()
|
||||
|
||||
@@ -800,10 +800,23 @@ func (h *Headscale) Serve() error {
|
||||
}
|
||||
|
||||
default:
|
||||
trace := log.Trace().Msgf
|
||||
log.Info().
|
||||
Str("signal", sig.String()).
|
||||
Msg("Received signal to stop, shutting down gracefully")
|
||||
|
||||
trace("closing map sessions")
|
||||
wg := sync.WaitGroup{}
|
||||
for _, mapSess := range h.mapSessions {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
mapSess.close()
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
trace("waiting for netmap stream to close")
|
||||
h.pollNetMapStreamWG.Wait()
|
||||
|
||||
// Gracefully shut down servers
|
||||
@@ -811,32 +824,44 @@ func (h *Headscale) Serve() error {
|
||||
context.Background(),
|
||||
types.HTTPShutdownTimeout,
|
||||
)
|
||||
trace("shutting down debug http server")
|
||||
if err := debugHTTPServer.Shutdown(ctx); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to shutdown prometheus http")
|
||||
}
|
||||
trace("shutting down main http server")
|
||||
if err := httpServer.Shutdown(ctx); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to shutdown http")
|
||||
}
|
||||
|
||||
trace("shutting down grpc server (socket)")
|
||||
grpcSocket.GracefulStop()
|
||||
|
||||
if grpcServer != nil {
|
||||
trace("shutting down grpc server (external)")
|
||||
grpcServer.GracefulStop()
|
||||
grpcListener.Close()
|
||||
}
|
||||
|
||||
if tailsqlContext != nil {
|
||||
trace("shutting down tailsql")
|
||||
tailsqlContext.Done()
|
||||
}
|
||||
|
||||
trace("closing node notifier")
|
||||
h.nodeNotifier.Close()
|
||||
|
||||
// Close network listeners
|
||||
trace("closing network listeners")
|
||||
debugHTTPListener.Close()
|
||||
httpListener.Close()
|
||||
grpcGatewayConn.Close()
|
||||
|
||||
// Stop listening (and unlink the socket if unix type):
|
||||
trace("closing socket listener")
|
||||
socketListener.Close()
|
||||
|
||||
// Close db connections
|
||||
trace("closing database connection")
|
||||
err = h.db.Close()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to close db")
|
||||
|
||||
@@ -315,13 +315,16 @@ func (h *Headscale) handleAuthKey(
|
||||
|
||||
node.NodeKey = nodeKey
|
||||
node.AuthKeyID = uint(pak.ID)
|
||||
err := h.db.NodeSetExpiry(node.ID, registerRequest.Expiry)
|
||||
node.Expiry = ®isterRequest.Expiry
|
||||
node.User = pak.User
|
||||
node.UserID = pak.UserID
|
||||
err := h.db.DB.Save(node).Error
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed to refresh node")
|
||||
Msg("failed to save node after logging in with auth key")
|
||||
|
||||
return
|
||||
}
|
||||
@@ -344,7 +347,7 @@ func (h *Headscale) handleAuthKey(
|
||||
}
|
||||
|
||||
ctx := types.NotifyCtx(context.Background(), "handle-authkey", "na")
|
||||
h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdateExpire(node.ID, registerRequest.Expiry), node.ID)
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{Type: types.StatePeerChanged, ChangeNodes: []types.NodeID{node.ID}})
|
||||
} else {
|
||||
now := time.Now().UTC()
|
||||
|
||||
|
||||
@@ -34,6 +34,11 @@ func NewNotifier(cfg *types.Config) *Notifier {
|
||||
return n
|
||||
}
|
||||
|
||||
// Close stops the batcher inside the notifier.
|
||||
func (n *Notifier) Close() {
|
||||
n.b.close()
|
||||
}
|
||||
|
||||
func (n *Notifier) AddNode(nodeID types.NodeID, c chan<- types.StateUpdate) {
|
||||
log.Trace().Caller().Uint64("node.id", nodeID.Uint64()).Msg("acquiring lock to add node")
|
||||
defer log.Trace().
|
||||
|
||||
@@ -650,6 +650,10 @@ func GetHeadscaleConfig() (*Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if prefix4 == nil && prefix6 == nil {
|
||||
return nil, fmt.Errorf("no IPv4 or IPv6 prefix configured, minimum one prefix is required")
|
||||
}
|
||||
|
||||
allocStr := viper.GetString("prefixes.allocation")
|
||||
var alloc IPAllocationStrategy
|
||||
switch allocStr {
|
||||
@@ -658,7 +662,7 @@ func GetHeadscaleConfig() (*Config, error) {
|
||||
case string(IPAllocationStrategyRandom):
|
||||
alloc = IPAllocationStrategyRandom
|
||||
default:
|
||||
log.Fatal().Msgf("config error, prefixes.allocation is set to %s, which is not a valid strategy, allowed options: %s, %s", allocStr, IPAllocationStrategySequential, IPAllocationStrategyRandom)
|
||||
return nil, fmt.Errorf("config error, prefixes.allocation is set to %s, which is not a valid strategy, allowed options: %s, %s", allocStr, IPAllocationStrategySequential, IPAllocationStrategyRandom)
|
||||
}
|
||||
|
||||
dnsConfig, baseDomain := GetDNSConfig()
|
||||
|
||||
@@ -306,11 +306,15 @@ func (node *Node) AfterFind(tx *gorm.DB) error {
|
||||
}
|
||||
node.NodeKey = nodeKey
|
||||
|
||||
var discoKey key.DiscoPublic
|
||||
if err := discoKey.UnmarshalText([]byte(node.DiscoKeyDatabaseField)); err != nil {
|
||||
return fmt.Errorf("unmarshalling disco key from db: %w", err)
|
||||
// DiscoKey might be empty if a node has not sent it to headscale.
|
||||
// This means that this might fail if the disco key is empty.
|
||||
if node.DiscoKeyDatabaseField != "" {
|
||||
var discoKey key.DiscoPublic
|
||||
if err := discoKey.UnmarshalText([]byte(node.DiscoKeyDatabaseField)); err != nil {
|
||||
return fmt.Errorf("unmarshalling disco key from db: %w", err)
|
||||
}
|
||||
node.DiscoKey = discoKey
|
||||
}
|
||||
node.DiscoKey = discoKey
|
||||
|
||||
endpoints := make([]netip.AddrPort, len(node.EndpointsDatabaseField))
|
||||
for idx, ep := range node.EndpointsDatabaseField {
|
||||
|
||||
@@ -388,6 +388,101 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
|
||||
assert.Len(t, listedPreAuthKeys, 3)
|
||||
}
|
||||
|
||||
func TestPreAuthKeyCorrectUserLoggedInCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
user1 := "user1"
|
||||
user2 := "user2"
|
||||
|
||||
scenario, err := NewScenario(dockertestMaxWait())
|
||||
assertNoErr(t, err)
|
||||
defer scenario.Shutdown()
|
||||
|
||||
spec := map[string]int{
|
||||
user1: 1,
|
||||
user2: 0,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipak"))
|
||||
assertNoErr(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErr(t, err)
|
||||
|
||||
var user2Key v1.PreAuthKey
|
||||
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--user",
|
||||
user2,
|
||||
"create",
|
||||
"--reusable",
|
||||
"--expiration",
|
||||
"24h",
|
||||
"--output",
|
||||
"json",
|
||||
"--tags",
|
||||
"tag:test1,tag:test2",
|
||||
},
|
||||
&user2Key,
|
||||
)
|
||||
assertNoErr(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
|
||||
assert.Len(t, allClients, 1)
|
||||
|
||||
client := allClients[0]
|
||||
|
||||
// Log out from user1
|
||||
err = client.Logout()
|
||||
assertNoErr(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleLogout()
|
||||
assertNoErr(t, err)
|
||||
|
||||
status, err := client.Status()
|
||||
assertNoErr(t, err)
|
||||
if status.BackendState == "Starting" || status.BackendState == "Running" {
|
||||
t.Fatalf("expected node to be logged out, backend state: %s", status.BackendState)
|
||||
}
|
||||
|
||||
err = client.Login(headscale.GetEndpoint(), user2Key.GetKey())
|
||||
assertNoErr(t, err)
|
||||
|
||||
status, err = client.Status()
|
||||
assertNoErr(t, err)
|
||||
if status.BackendState != "Running" {
|
||||
t.Fatalf("expected node to be logged in, backend state: %s", status.BackendState)
|
||||
}
|
||||
|
||||
if status.Self.UserID.String() != "userid:2" {
|
||||
t.Fatalf("expected node to be logged in as userid:2, got: %s", status.Self.UserID.String())
|
||||
}
|
||||
|
||||
var listNodes []v1.Node
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"nodes",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&listNodes,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, listNodes, 1)
|
||||
|
||||
assert.Equal(t, "user2", listNodes[0].User.Name)
|
||||
}
|
||||
|
||||
func TestApiKeyCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
Reference in New Issue
Block a user