all: use lowercase log messages

Go style recommends that log messages and error strings should not be
capitalized (unless beginning with proper nouns or acronyms) and should
not end with punctuation.

This change normalizes all zerolog .Msg() and .Msgf() calls to start
with lowercase letters, following Go conventions and making logs more
consistent across the codebase.
This commit is contained in:
Kristoffer Dalby
2026-02-05 13:59:26 +00:00
parent dd16567c52
commit 4a9a329339
22 changed files with 90 additions and 85 deletions

View File

@@ -16,7 +16,7 @@ var configTestCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
_, err := newHeadscaleServerWithConfig() _, err := newHeadscaleServerWithConfig()
if err != nil { if err != nil {
log.Fatal().Caller().Err(err).Msg("Error initializing") log.Fatal().Caller().Err(err).Msg("error initializing")
} }
}, },
} }

View File

@@ -36,7 +36,7 @@ var mockOidcCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
err := mockOIDC() err := mockOIDC()
if err != nil { if err != nil {
log.Error().Err(err).Msgf("Error running mock OIDC server") log.Error().Err(err).Msgf("error running mock OIDC server")
os.Exit(1) os.Exit(1)
} }
}, },
@@ -81,7 +81,7 @@ func mockOIDC() error {
log.Info().Interface(zf.Users, users).Msg("loading users from JSON") log.Info().Interface(zf.Users, users).Msg("loading users from JSON")
log.Info().Msgf("Access token TTL: %s", accessTTL) log.Info().Msgf("access token TTL: %s", accessTTL)
port, err := strconv.Atoi(portStr) port, err := strconv.Atoi(portStr)
if err != nil { if err != nil {
@@ -102,8 +102,9 @@ func mockOIDC() error {
if err != nil { if err != nil {
return err return err
} }
log.Info().Msgf("Mock OIDC server listening on %s", listener.Addr().String())
log.Info().Msgf("Issuer: %s", mock.Issuer()) log.Info().Msgf("mock OIDC server listening on %s", listener.Addr().String())
log.Info().Msgf("issuer: %s", mock.Issuer())
c := make(chan struct{}) c := make(chan struct{})
<-c <-c
@@ -136,10 +137,10 @@ func getMockOIDC(clientID string, clientSecret string, users []mockoidc.MockUser
mock.AddMiddleware(func(h http.Handler) http.Handler { mock.AddMiddleware(func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.Info().Msgf("Request: %+v", r) log.Info().Msgf("request: %+v", r)
h.ServeHTTP(w, r) h.ServeHTTP(w, r)
if r.Response != nil { if r.Response != nil {
log.Info().Msgf("Response: %+v", r.Response) log.Info().Msgf("response: %+v", r.Response)
} }
}) })
}) })

View File

@@ -48,12 +48,12 @@ func initConfig() {
if cfgFile != "" { if cfgFile != "" {
err := types.LoadConfig(cfgFile, true) err := types.LoadConfig(cfgFile, true)
if err != nil { if err != nil {
log.Fatal().Caller().Err(err).Msgf("Error loading config file %s", cfgFile) log.Fatal().Caller().Err(err).Msgf("error loading config file %s", cfgFile)
} }
} else { } else {
err := types.LoadConfig("", false) err := types.LoadConfig("", false)
if err != nil { if err != nil {
log.Fatal().Caller().Err(err).Msgf("Error loading config") log.Fatal().Caller().Err(err).Msgf("error loading config")
} }
} }

View File

@@ -29,12 +29,12 @@ var serveCmd = &cobra.Command{
fmt.Println(squibbleErr.Diff) fmt.Println(squibbleErr.Diff)
} }
log.Fatal().Caller().Err(err).Msg("Error initializing") log.Fatal().Caller().Err(err).Msg("error initializing")
} }
err = app.Serve() err = app.Serve()
if err != nil && !errors.Is(err, http.ErrServerClosed) { if err != nil && !errors.Is(err, http.ErrServerClosed) {
log.Fatal().Caller().Err(err).Msg("Headscale ran into an error and had to shut down.") log.Fatal().Caller().Err(err).Msg("headscale ran into an error and had to shut down")
} }
}, },
} }

View File

@@ -82,7 +82,7 @@ var createUserCmd = &cobra.Command{
defer cancel() defer cancel()
defer conn.Close() defer conn.Close()
log.Trace().Interface(zf.Client, client).Msg("Obtained gRPC client") log.Trace().Interface(zf.Client, client).Msg("obtained gRPC client")
request := &v1.CreateUserRequest{Name: userName} request := &v1.CreateUserRequest{Name: userName}
@@ -108,7 +108,7 @@ var createUserCmd = &cobra.Command{
request.PictureUrl = pictureURL request.PictureUrl = pictureURL
} }
log.Trace().Interface(zf.Request, request).Msg("Sending CreateUser request") log.Trace().Interface(zf.Request, request).Msg("sending CreateUser request")
response, err := client.CreateUser(ctx, request) response, err := client.CreateUser(ctx, request)
if err != nil { if err != nil {
ErrorOutput( ErrorOutput(

View File

@@ -93,7 +93,7 @@ func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *g
// If we are not connecting to a local server, require an API key for authentication // If we are not connecting to a local server, require an API key for authentication
apiKey := cfg.CLI.APIKey apiKey := cfg.CLI.APIKey
if apiKey == "" { if apiKey == "" {
log.Fatal().Caller().Msgf("HEADSCALE_CLI_API_KEY environment variable needs to be set.") log.Fatal().Caller().Msgf("HEADSCALE_CLI_API_KEY environment variable needs to be set")
} }
grpcOptions = append(grpcOptions, grpcOptions = append(grpcOptions,
grpc.WithPerRPCCredentials(tokenAuth{ grpc.WithPerRPCCredentials(tokenAuth{
@@ -119,10 +119,10 @@ func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *g
} }
} }
log.Trace().Caller().Str(zf.Address, address).Msg("Connecting via gRPC") log.Trace().Caller().Str(zf.Address, address).Msg("connecting via gRPC")
conn, err := grpc.DialContext(ctx, address, grpcOptions...) conn, err := grpc.DialContext(ctx, address, grpcOptions...)
if err != nil { if err != nil {
log.Fatal().Caller().Err(err).Msgf("Could not connect: %v", err) log.Fatal().Caller().Err(err).Msgf("could not connect: %v", err)
os.Exit(-1) // we get here if logging is suppressed (i.e., json output) os.Exit(-1) // we get here if logging is suppressed (i.e., json output)
} }

View File

@@ -140,19 +140,19 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
ephemeralGC := db.NewEphemeralGarbageCollector(func(ni types.NodeID) { ephemeralGC := db.NewEphemeralGarbageCollector(func(ni types.NodeID) {
node, ok := app.state.GetNodeByID(ni) node, ok := app.state.GetNodeByID(ni)
if !ok { if !ok {
log.Error().Uint64("node.id", ni.Uint64()).Msg("Ephemeral node deletion failed") log.Error().Uint64("node.id", ni.Uint64()).Msg("ephemeral node deletion failed")
log.Debug().Caller().Uint64("node.id", ni.Uint64()).Msg("Ephemeral node deletion failed because node not found in NodeStore") log.Debug().Caller().Uint64("node.id", ni.Uint64()).Msg("ephemeral node deletion failed because node not found in NodeStore")
return return
} }
policyChanged, err := app.state.DeleteNode(node) policyChanged, err := app.state.DeleteNode(node)
if err != nil { if err != nil {
log.Error().Err(err).EmbedObject(node).Msg("Ephemeral node deletion failed") log.Error().Err(err).EmbedObject(node).Msg("ephemeral node deletion failed")
return return
} }
app.Change(policyChanged) app.Change(policyChanged)
log.Debug().Caller().EmbedObject(node).Msg("Ephemeral node deleted because garbage collection timeout reached") log.Debug().Caller().EmbedObject(node).Msg("ephemeral node deleted because garbage collection timeout reached")
}) })
app.ephemeralGC = ephemeralGC app.ephemeralGC = ephemeralGC
@@ -286,7 +286,7 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
} }
case <-derpTickerChan: case <-derpTickerChan:
log.Info().Msg("Fetching DERPMap updates") log.Info().Msg("fetching DERPMap updates")
derpMap, err := backoff.Retry(ctx, func() (*tailcfg.DERPMap, error) { derpMap, err := backoff.Retry(ctx, func() (*tailcfg.DERPMap, error) {
derpMap, err := derp.GetDERPMap(h.cfg.DERP) derpMap, err := derp.GetDERPMap(h.cfg.DERP)
if err != nil { if err != nil {
@@ -506,7 +506,7 @@ func (h *Headscale) Serve() error {
} }
versionInfo := types.GetVersionInfo() versionInfo := types.GetVersionInfo()
log.Info().Str("version", versionInfo.Version).Str("commit", versionInfo.Commit).Msg("Starting Headscale") log.Info().Str("version", versionInfo.Version).Str("commit", versionInfo.Commit).Msg("starting headscale")
log.Info(). log.Info().
Str("minimum_version", capver.TailscaleVersion(capver.MinSupportedCapabilityVersion)). Str("minimum_version", capver.TailscaleVersion(capver.MinSupportedCapabilityVersion)).
Msg("Clients with a lower minimum version will be rejected") Msg("Clients with a lower minimum version will be rejected")
@@ -662,7 +662,7 @@ func (h *Headscale) Serve() error {
var grpcServer *grpc.Server var grpcServer *grpc.Server
var grpcListener net.Listener var grpcListener net.Listener
if tlsConfig != nil || h.cfg.GRPCAllowInsecure { if tlsConfig != nil || h.cfg.GRPCAllowInsecure {
log.Info().Msgf("Enabling remote gRPC at %s", h.cfg.GRPCAddr) log.Info().Msgf("enabling remote gRPC at %s", h.cfg.GRPCAddr)
grpcOptions := []grpc.ServerOption{ grpcOptions := []grpc.ServerOption{
grpc.ChainUnaryInterceptor( grpc.ChainUnaryInterceptor(
@@ -940,13 +940,13 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
} }
} else if h.cfg.TLS.CertPath == "" { } else if h.cfg.TLS.CertPath == "" {
if !strings.HasPrefix(h.cfg.ServerURL, "http://") { if !strings.HasPrefix(h.cfg.ServerURL, "http://") {
log.Warn().Msg("Listening without TLS but ServerURL does not start with http://") log.Warn().Msg("listening without TLS but ServerURL does not start with http://")
} }
return nil, err return nil, err
} else { } else {
if !strings.HasPrefix(h.cfg.ServerURL, "https://") { if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
log.Warn().Msg("Listening with TLS but ServerURL does not start with https://") log.Warn().Msg("listening with TLS but ServerURL does not start with https://")
} }
tlsConfig := &tls.Config{ tlsConfig := &tls.Config{
@@ -970,7 +970,7 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
privateKey, err := os.ReadFile(path) privateKey, err := os.ReadFile(path)
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
log.Info().Str("path", path).Msg("No private key file at path, creating...") log.Info().Str("path", path).Msg("no private key file at path, creating...")
machineKey := key.NewMachine() machineKey := key.NewMachine()
@@ -1023,7 +1023,7 @@ type acmeLogger struct {
func (l *acmeLogger) RoundTrip(req *http.Request) (*http.Response, error) { func (l *acmeLogger) RoundTrip(req *http.Request) (*http.Response, error) {
resp, err := l.rt.RoundTrip(req) resp, err := l.rt.RoundTrip(req)
if err != nil { if err != nil {
log.Error().Err(err).Str("url", req.URL.String()).Msg("ACME request failed") log.Error().Err(err).Str("url", req.URL.String()).Msg("acme request failed")
return nil, err return nil, err
} }
@@ -1031,7 +1031,7 @@ func (l *acmeLogger) RoundTrip(req *http.Request) (*http.Response, error) {
defer resp.Body.Close() defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body) body, _ := io.ReadAll(resp.Body)
log.Error().Int("status_code", resp.StatusCode).Str("url", req.URL.String()).Bytes("body", body).Msg("ACME request returned error") log.Error().Int("status_code", resp.StatusCode).Str("url", req.URL.String()).Bytes("body", body).Msg("acme request returned error")
} }
return resp, nil return resp, nil

View File

@@ -320,7 +320,7 @@ func (h *Headscale) reqToNewRegisterResponse(
nodeToRegister.Node.Expiry = &req.Expiry nodeToRegister.Node.Expiry = &req.Expiry
} }
log.Info().Msgf("New followup node registration using key: %s", newRegID) log.Info().Msgf("new followup node registration using key: %s", newRegID)
h.state.SetRegistrationCacheEntry(newRegID, nodeToRegister) h.state.SetRegistrationCacheEntry(newRegID, nodeToRegister)
return &tailcfg.RegisterResponse{ return &tailcfg.RegisterResponse{
@@ -451,7 +451,7 @@ func (h *Headscale) handleRegisterInteractive(
nodeToRegister, nodeToRegister,
) )
log.Info().Msgf("Starting node registration using key: %s", registrationId) log.Info().Msgf("starting node registration using key: %s", registrationId)
return &tailcfg.RegisterResponse{ return &tailcfg.RegisterResponse{
AuthURL: h.authProvider.AuthURL(registrationId), AuthURL: h.authProvider.AuthURL(registrationId),

View File

@@ -245,11 +245,11 @@ AND auth_key_id NOT IN (
Migrate: func(tx *gorm.DB) error { Migrate: func(tx *gorm.DB) error {
// Only run on SQLite // Only run on SQLite
if cfg.Database.Type != types.DatabaseSqlite { if cfg.Database.Type != types.DatabaseSqlite {
log.Info().Msg("Skipping schema migration on non-SQLite database") log.Info().Msg("skipping schema migration on non-SQLite database")
return nil return nil
} }
log.Info().Msg("Starting schema recreation with table renaming") log.Info().Msg("starting schema recreation with table renaming")
// Rename existing tables to _old versions // Rename existing tables to _old versions
tablesToRename := []string{"users", "pre_auth_keys", "api_keys", "nodes", "policies"} tablesToRename := []string{"users", "pre_auth_keys", "api_keys", "nodes", "policies"}
@@ -258,7 +258,7 @@ AND auth_key_id NOT IN (
var routesExists bool var routesExists bool
err := tx.Raw("SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='routes'").Row().Scan(&routesExists) err := tx.Raw("SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='routes'").Row().Scan(&routesExists)
if err == nil && routesExists { if err == nil && routesExists {
log.Info().Msg("Dropping leftover routes table") log.Info().Msg("dropping leftover routes table")
if err := tx.Exec("DROP TABLE routes").Error; err != nil { if err := tx.Exec("DROP TABLE routes").Error; err != nil {
return fmt.Errorf("dropping routes table: %w", err) return fmt.Errorf("dropping routes table: %w", err)
} }
@@ -425,11 +425,11 @@ AND auth_key_id NOT IN (
// Drop old tables only after everything succeeds // Drop old tables only after everything succeeds
for _, table := range tablesToRename { for _, table := range tablesToRename {
if err := tx.Exec("DROP TABLE IF EXISTS " + table + "_old").Error; err != nil { if err := tx.Exec("DROP TABLE IF EXISTS " + table + "_old").Error; err != nil {
log.Warn().Str("table", table+"_old").Err(err).Msg("Failed to drop old table, but migration succeeded") log.Warn().Str("table", table+"_old").Err(err).Msg("failed to drop old table, but migration succeeded")
} }
} }
log.Info().Msg("Schema recreation completed successfully") log.Info().Msg("schema recreation completed successfully")
return nil return nil
}, },
@@ -595,12 +595,12 @@ AND auth_key_id NOT IN (
// 1. Load policy from file or database based on configuration // 1. Load policy from file or database based on configuration
policyData, err := PolicyBytes(tx, cfg) policyData, err := PolicyBytes(tx, cfg)
if err != nil { if err != nil {
log.Warn().Err(err).Msg("Failed to load policy, skipping RequestTags migration (tags will be validated on node reconnect)") log.Warn().Err(err).Msg("failed to load policy, skipping RequestTags migration (tags will be validated on node reconnect)")
return nil return nil
} }
if len(policyData) == 0 { if len(policyData) == 0 {
log.Info().Msg("No policy found, skipping RequestTags migration (tags will be validated on node reconnect)") log.Info().Msg("no policy found, skipping RequestTags migration (tags will be validated on node reconnect)")
return nil return nil
} }
@@ -618,7 +618,7 @@ AND auth_key_id NOT IN (
// 3. Create PolicyManager (handles HuJSON parsing, groups, nested tags, etc.) // 3. Create PolicyManager (handles HuJSON parsing, groups, nested tags, etc.)
polMan, err := policy.NewPolicyManager(policyData, users, nodes.ViewSlice()) polMan, err := policy.NewPolicyManager(policyData, users, nodes.ViewSlice())
if err != nil { if err != nil {
log.Warn().Err(err).Msg("Failed to parse policy, skipping RequestTags migration (tags will be validated on node reconnect)") log.Warn().Err(err).Msg("failed to parse policy, skipping RequestTags migration (tags will be validated on node reconnect)")
return nil return nil
} }
@@ -935,7 +935,7 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig
} }
for _, migrationID := range migrationIDs { for _, migrationID := range migrationIDs {
log.Trace().Caller().Str("migration_id", migrationID).Msg("Running migration") log.Trace().Caller().Str("migration_id", migrationID).Msg("running migration")
needsFKDisabled := migrationsRequiringFKDisabled[migrationID] needsFKDisabled := migrationsRequiringFKDisabled[migrationID]
if needsFKDisabled { if needsFKDisabled {

View File

@@ -283,7 +283,7 @@ func (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) {
} }
for _, node := range nodes { for _, node := range nodes {
log.Trace().Caller().EmbedObject(node).Msg("IP backfill check started because node found in database") log.Trace().Caller().EmbedObject(node).Msg("ip backfill check started because node found in database")
changed := false changed := false
// IPv4 prefix is set, but node ip is missing, alloc // IPv4 prefix is set, but node ip is missing, alloc

View File

@@ -370,7 +370,7 @@ func RegisterNodeForTest(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *n
logEvent = logEvent.Str(zf.UserName, "none") logEvent = logEvent.Str(zf.UserName, "none")
} }
logEvent.Msg("Registering test node") logEvent.Msg("registering test node")
// If the a new node is registered with the same machine key, to the same user, // If the a new node is registered with the same machine key, to the same user,
// update the existing node. // update the existing node.
@@ -418,7 +418,7 @@ func RegisterNodeForTest(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *n
node.Hostname, err = util.NormaliseHostname(node.Hostname) node.Hostname, err = util.NormaliseHostname(node.Hostname)
if err != nil { if err != nil {
newHostname := util.InvalidString() newHostname := util.InvalidString()
log.Info().Err(err).Str(zf.InvalidHostname, node.Hostname).Str(zf.NewHostname, newHostname).Msgf("Invalid hostname, replacing") log.Info().Err(err).Str(zf.InvalidHostname, node.Hostname).Str(zf.NewHostname, newHostname).Msgf("invalid hostname, replacing")
node.Hostname = newHostname node.Hostname = newHostname
} }

View File

@@ -54,7 +54,7 @@ func NewDERPServer(
derpKey key.NodePrivate, derpKey key.NodePrivate,
cfg *types.DERPConfig, cfg *types.DERPConfig,
) (*DERPServer, error) { ) (*DERPServer, error) {
log.Trace().Caller().Msg("Creating new embedded DERP server") log.Trace().Caller().Msg("creating new embedded DERP server")
server := derpserver.New(derpKey, util.TSLogfWrapper()) // nolint // zerolinter complains server := derpserver.New(derpKey, util.TSLogfWrapper()) // nolint // zerolinter complains
if cfg.ServerVerifyClients { if cfg.ServerVerifyClients {
@@ -100,11 +100,11 @@ func (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) {
if debugUseDERPIP { if debugUseDERPIP {
ips, err := net.LookupIP(host) ips, err := net.LookupIP(host)
if err != nil { if err != nil {
log.Error().Caller().Err(err).Msgf("Failed to resolve DERP hostname %s to IP, using hostname", host) log.Error().Caller().Err(err).Msgf("failed to resolve DERP hostname %s to IP, using hostname", host)
} else if len(ips) > 0 { } else if len(ips) > 0 {
// Use the first IP address // Use the first IP address
ipStr := ips[0].String() ipStr := ips[0].String()
log.Info().Caller().Msgf("HEADSCALE_DEBUG_DERP_USE_IP: Resolved %s to %s", host, ipStr) log.Info().Caller().Msgf("HEADSCALE_DEBUG_DERP_USE_IP: resolved %s to %s", host, ipStr)
host = ipStr host = ipStr
} }
} }
@@ -136,8 +136,8 @@ func (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) {
} }
localDERPregion.Nodes[0].STUNPort = portSTUN localDERPregion.Nodes[0].STUNPort = portSTUN
log.Info().Caller().Msgf("DERP region: %+v", localDERPregion) log.Info().Caller().Msgf("derp region: %+v", localDERPregion)
log.Info().Caller().Msgf("DERP Nodes[0]: %+v", localDERPregion.Nodes[0]) log.Info().Caller().Msgf("derp nodes[0]: %+v", localDERPregion.Nodes[0])
return localDERPregion, nil return localDERPregion, nil
} }
@@ -222,7 +222,7 @@ func (d *DERPServer) servePlain(writer http.ResponseWriter, req *http.Request) {
hijacker, ok := writer.(http.Hijacker) hijacker, ok := writer.(http.Hijacker)
if !ok { if !ok {
log.Error().Caller().Msg("DERP requires Hijacker interface from Gin") log.Error().Caller().Msg("derp requires Hijacker interface from Gin")
writer.Header().Set("Content-Type", "text/plain") writer.Header().Set("Content-Type", "text/plain")
writer.WriteHeader(http.StatusInternalServerError) writer.WriteHeader(http.StatusInternalServerError)
_, err := writer.Write([]byte("HTTP does not support general TCP support")) _, err := writer.Write([]byte("HTTP does not support general TCP support"))
@@ -238,7 +238,7 @@ func (d *DERPServer) servePlain(writer http.ResponseWriter, req *http.Request) {
netConn, conn, err := hijacker.Hijack() netConn, conn, err := hijacker.Hijack()
if err != nil { if err != nil {
log.Error().Caller().Err(err).Msgf("Hijack failed") log.Error().Caller().Err(err).Msgf("hijack failed")
writer.Header().Set("Content-Type", "text/plain") writer.Header().Set("Content-Type", "text/plain")
writer.WriteHeader(http.StatusInternalServerError) writer.WriteHeader(http.StatusInternalServerError)
_, err = writer.Write([]byte("HTTP does not support general TCP support")) _, err = writer.Write([]byte("HTTP does not support general TCP support"))
@@ -251,7 +251,8 @@ func (d *DERPServer) servePlain(writer http.ResponseWriter, req *http.Request) {
return return
} }
log.Trace().Caller().Msgf("Hijacked connection from %v", req.RemoteAddr)
log.Trace().Caller().Msgf("hijacked connection from %v", req.RemoteAddr)
if !fastStart { if !fastStart {
pubKey := d.key.Public() pubKey := d.key.Public()
@@ -342,11 +343,12 @@ func (d *DERPServer) ServeSTUN() {
if err != nil { if err != nil {
log.Fatal().Msgf("failed to open STUN listener: %v", err) log.Fatal().Msgf("failed to open STUN listener: %v", err)
} }
log.Info().Msgf("STUN server started at %s", packetConn.LocalAddr())
log.Info().Msgf("stun server started at %s", packetConn.LocalAddr())
udpConn, ok := packetConn.(*net.UDPConn) udpConn, ok := packetConn.(*net.UDPConn)
if !ok { if !ok {
log.Fatal().Msg("STUN listener is not a UDP listener") log.Fatal().Msg("stun listener is not a UDP listener")
} }
serverSTUNListener(context.Background(), udpConn) serverSTUNListener(context.Background(), udpConn)
} }
@@ -364,7 +366,8 @@ func serverSTUNListener(ctx context.Context, packetConn *net.UDPConn) {
if ctx.Err() != nil { if ctx.Err() != nil {
return return
} }
log.Error().Caller().Err(err).Msgf("STUN ReadFrom")
log.Error().Caller().Err(err).Msgf("stun ReadFrom")
// Rate limit error logging - wait before retrying, but respect context cancellation // Rate limit error logging - wait before retrying, but respect context cancellation
select { select {
@@ -375,16 +378,17 @@ func serverSTUNListener(ctx context.Context, packetConn *net.UDPConn) {
continue continue
} }
log.Trace().Caller().Msgf("STUN request from %v", udpAddr)
log.Trace().Caller().Msgf("stun request from %v", udpAddr)
pkt := buf[:bytesRead] pkt := buf[:bytesRead]
if !stun.Is(pkt) { if !stun.Is(pkt) {
log.Trace().Caller().Msgf("UDP packet is not STUN") log.Trace().Caller().Msgf("udp packet is not stun")
continue continue
} }
txid, err := stun.ParseBindingRequest(pkt) txid, err := stun.ParseBindingRequest(pkt)
if err != nil { if err != nil {
log.Trace().Caller().Err(err).Msgf("STUN parse error") log.Trace().Caller().Err(err).Msgf("stun parse error")
continue continue
} }
@@ -393,7 +397,7 @@ func serverSTUNListener(ctx context.Context, packetConn *net.UDPConn) {
res := stun.Response(txid, netip.AddrPortFrom(addr, uint16(udpAddr.Port))) res := stun.Response(txid, netip.AddrPortFrom(addr, uint16(udpAddr.Port)))
_, err = packetConn.WriteTo(res, udpAddr) _, err = packetConn.WriteTo(res, udpAddr)
if err != nil { if err != nil {
log.Trace().Caller().Err(err).Msgf("Issue writing to UDP") log.Trace().Caller().Err(err).Msgf("issue writing to UDP")
continue continue
} }
@@ -413,7 +417,7 @@ type DERPVerifyTransport struct {
func (t *DERPVerifyTransport) RoundTrip(req *http.Request) (*http.Response, error) { func (t *DERPVerifyTransport) RoundTrip(req *http.Request) (*http.Response, error) {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
if err := t.handleVerifyRequest(req, buf); err != nil { if err := t.handleVerifyRequest(req, buf); err != nil {
log.Error().Caller().Err(err).Msg("Failed to handle client verify request: ") log.Error().Caller().Err(err).Msg("failed to handle client verify request")
return nil, err return nil, err
} }

View File

@@ -236,7 +236,7 @@ func (api headscaleV1APIServer) RegisterNode(
// Generate ephemeral registration key for tracking this registration flow in logs // Generate ephemeral registration key for tracking this registration flow in logs
registrationKey, err := util.GenerateRegistrationKey() registrationKey, err := util.GenerateRegistrationKey()
if err != nil { if err != nil {
log.Warn().Err(err).Msg("Failed to generate registration key") log.Warn().Err(err).Msg("failed to generate registration key")
registrationKey = "" // Continue without key if generation fails registrationKey = "" // Continue without key if generation fails
} }
@@ -546,7 +546,7 @@ func (api headscaleV1APIServer) BackfillNodeIPs(
ctx context.Context, ctx context.Context,
request *v1.BackfillNodeIPsRequest, request *v1.BackfillNodeIPsRequest,
) (*v1.BackfillNodeIPsResponse, error) { ) (*v1.BackfillNodeIPsResponse, error) {
log.Trace().Caller().Msg("Backfill called") log.Trace().Caller().Msg("backfill called")
if !request.Confirmed { if !request.Confirmed {
return nil, errors.New("not confirmed, aborting") return nil, errors.New("not confirmed, aborting")
@@ -823,7 +823,7 @@ func (api headscaleV1APIServer) Health(
} }
if healthErr != nil { if healthErr != nil {
log.Error().Err(healthErr).Msg("Health check failed") log.Error().Err(healthErr).Msg("health check failed")
} }
return response, healthErr return response, healthErr

View File

@@ -119,14 +119,14 @@ func (b *LockFreeBatcher) RemoveNode(id types.NodeID, c chan<- *tailcfg.MapRespo
nodeConn, exists := b.nodes.Load(id) nodeConn, exists := b.nodes.Load(id)
if !exists { if !exists {
nlog.Debug().Caller().Msg("RemoveNode called for non-existent node") nlog.Debug().Caller().Msg("removeNode called for non-existent node")
return false return false
} }
// Remove specific connection // Remove specific connection
removed := nodeConn.removeConnectionByChannel(c) removed := nodeConn.removeConnectionByChannel(c)
if !removed { if !removed {
nlog.Debug().Caller().Msg("RemoveNode: channel not found, connection already removed or invalid") nlog.Debug().Caller().Msg("removeNode: channel not found, connection already removed or invalid")
return false return false
} }

View File

@@ -328,7 +328,7 @@ func writeDebugMapResponse(
fmt.Sprintf("%s-%s.json", now, t), fmt.Sprintf("%s-%s.json", now, t),
) )
log.Trace().Msgf("Writing MapResponse to %s", mapResponsePath) log.Trace().Msgf("writing MapResponse to %s", mapResponsePath)
err = os.WriteFile(mapResponsePath, body, perms) err = os.WriteFile(mapResponsePath, body, perms)
if err != nil { if err != nil {
panic(err) panic(err)
@@ -357,7 +357,7 @@ func ReadMapResponsesFromDirectory(dir string) (map[types.NodeID][]tailcfg.MapRe
nodeIDu, err := strconv.ParseUint(node.Name(), 10, 64) nodeIDu, err := strconv.ParseUint(node.Name(), 10, 64)
if err != nil { if err != nil {
log.Error().Err(err).Msgf("Parsing node ID from dir %s", node.Name()) log.Error().Err(err).Msgf("parsing node ID from dir %s", node.Name())
continue continue
} }
@@ -365,7 +365,7 @@ func ReadMapResponsesFromDirectory(dir string) (map[types.NodeID][]tailcfg.MapRe
files, err := os.ReadDir(path.Join(dir, node.Name())) files, err := os.ReadDir(path.Join(dir, node.Name()))
if err != nil { if err != nil {
log.Error().Err(err).Msgf("Reading dir %s", node.Name()) log.Error().Err(err).Msgf("reading dir %s", node.Name())
continue continue
} }
@@ -380,14 +380,14 @@ func ReadMapResponsesFromDirectory(dir string) (map[types.NodeID][]tailcfg.MapRe
body, err := os.ReadFile(path.Join(dir, node.Name(), file.Name())) body, err := os.ReadFile(path.Join(dir, node.Name(), file.Name()))
if err != nil { if err != nil {
log.Error().Err(err).Msgf("Reading file %s", file.Name()) log.Error().Err(err).Msgf("reading file %s", file.Name())
continue continue
} }
var resp tailcfg.MapResponse var resp tailcfg.MapResponse
err = json.Unmarshal(body, &resp) err = json.Unmarshal(body, &resp)
if err != nil { if err != nil {
log.Error().Err(err).Msgf("Unmarshalling file %s", file.Name()) log.Error().Err(err).Msgf("unmarshalling file %s", file.Name())
continue continue
} }

View File

@@ -51,7 +51,7 @@ func (h *Headscale) NoiseUpgradeHandler(
writer http.ResponseWriter, writer http.ResponseWriter,
req *http.Request, req *http.Request,
) { ) {
log.Trace().Caller().Msgf("Noise upgrade handler for client %s", req.RemoteAddr) log.Trace().Caller().Msgf("noise upgrade handler for client %s", req.RemoteAddr)
upgrade := req.Header.Get("Upgrade") upgrade := req.Header.Get("Upgrade")
if upgrade == "" { if upgrade == "" {
@@ -60,7 +60,7 @@ func (h *Headscale) NoiseUpgradeHandler(
// be passed to Headscale. Let's give them a hint. // be passed to Headscale. Let's give them a hint.
log.Warn(). log.Warn().
Caller(). Caller().
Msg("No Upgrade header in TS2021 request. If headscale is behind a reverse proxy, make sure it is configured to pass WebSockets through.") Msg("no upgrade header in TS2021 request. If headscale is behind a reverse proxy, make sure it is configured to pass WebSockets through.")
http.Error(writer, "Internal error", http.StatusInternalServerError) http.Error(writer, "Internal error", http.StatusInternalServerError)
return return
@@ -279,7 +279,7 @@ func (ns *noiseServer) NoiseRegistrationHandler(
writer.WriteHeader(http.StatusOK) writer.WriteHeader(http.StatusOK)
if err := json.NewEncoder(writer).Encode(registerResponse); err != nil { if err := json.NewEncoder(writer).Encode(registerResponse); err != nil {
log.Error().Caller().Err(err).Msg("NoiseRegistrationHandler: failed to encode RegisterResponse") log.Error().Caller().Err(err).Msg("noise registration handler: failed to encode RegisterResponse")
return return
} }

View File

@@ -169,7 +169,7 @@ func (a *AuthProviderOIDC) RegisterHandler(
a.registrationCache.Set(state, registrationInfo) a.registrationCache.Set(state, registrationInfo)
authURL := a.oauth2Config.AuthCodeURL(state, extras...) authURL := a.oauth2Config.AuthCodeURL(state, extras...)
log.Debug().Caller().Msgf("Redirecting to %s for authentication", authURL) log.Debug().Caller().Msgf("redirecting to %s for authentication", authURL)
http.Redirect(writer, req, authURL, http.StatusFound) http.Redirect(writer, req, authURL, http.StatusFound)
} }

View File

@@ -351,7 +351,7 @@ func (pol *Policy) compileSSHPolicy(
// Resolve sources once - we'll use them differently for each destination type // Resolve sources once - we'll use them differently for each destination type
srcIPs, err := rule.Sources.Resolve(pol, users, nodes) srcIPs, err := rule.Sources.Resolve(pol, users, nodes)
if err != nil { if err != nil {
log.Trace().Caller().Err(err).Msgf("SSH policy compilation failed resolving source ips for rule %+v", rule) log.Trace().Caller().Err(err).Msgf("ssh policy compilation failed resolving source ips for rule %+v", rule)
} }
if srcIPs == nil || len(srcIPs.Prefixes()) == 0 { if srcIPs == nil || len(srcIPs.Prefixes()) == 0 {

View File

@@ -308,7 +308,7 @@ func (m *mapSession) writeMap(msg *tailcfg.MapResponse) error {
if f, ok := m.w.(http.Flusher); ok { if f, ok := m.w.(http.Flusher); ok {
f.Flush() f.Flush()
} else { } else {
m.log.Error().Caller().Msg("ResponseWriter does not implement http.Flusher, cannot flush") m.log.Error().Caller().Msg("responseWriter does not implement http.Flusher, cannot flush")
} }
} }

View File

@@ -280,7 +280,7 @@ func (s *State) CreateUser(user types.User) (*types.User, change.Change, error)
c = change.PolicyChange() c = change.PolicyChange()
} }
log.Info().Str(zf.UserName, user.Name).Msg("User created") log.Info().Str(zf.UserName, user.Name).Msg("user created")
return &user, c, nil return &user, c, nil
} }
@@ -487,7 +487,7 @@ func (s *State) Connect(id types.NodeID) []change.Change {
c := []change.Change{change.NodeOnlineFor(node)} c := []change.Change{change.NodeOnlineFor(node)}
log.Info().EmbedObject(node).Msg("Node connected") log.Info().EmbedObject(node).Msg("node connected")
// Use the node's current routes for primary route update // Use the node's current routes for primary route update
// AllApprovedRoutes() returns only the intersection of announced AND approved routes // AllApprovedRoutes() returns only the intersection of announced AND approved routes
@@ -515,7 +515,7 @@ func (s *State) Disconnect(id types.NodeID) ([]change.Change, error) {
return nil, fmt.Errorf("node not found: %d", id) return nil, fmt.Errorf("node not found: %d", id)
} }
log.Info().EmbedObject(node).Msg("Node disconnected") log.Info().EmbedObject(node).Msg("node disconnected")
// Special error handling for disconnect - we log errors but continue // Special error handling for disconnect - we log errors but continue
// because NodeStore is already updated and we need to notify peers // because NodeStore is already updated and we need to notify peers
@@ -523,7 +523,7 @@ func (s *State) Disconnect(id types.NodeID) ([]change.Change, error) {
if err != nil { if err != nil {
// Log error but don't fail the disconnection - NodeStore is already updated // Log error but don't fail the disconnection - NodeStore is already updated
// and we need to send change notifications to peers // and we need to send change notifications to peers
log.Error().Err(err).EmbedObject(node).Msg("Failed to update last seen in database") log.Error().Err(err).EmbedObject(node).Msg("failed to update last seen in database")
c = change.Change{} c = change.Change{}
} }
@@ -905,7 +905,7 @@ func (s *State) AutoApproveRoutes(nv types.NodeView) (change.Change, error) {
return change.Change{}, err return change.Change{}, err
} }
log.Info().EmbedObject(nv).Strs(zf.RoutesApproved, util.PrefixesToString(approved)).Msg("Routes approved") log.Info().EmbedObject(nv).Strs(zf.RoutesApproved, util.PrefixesToString(approved)).Msg("routes approved")
return c, nil return c, nil
} }
@@ -1441,7 +1441,7 @@ func (s *State) processReauthTags(
Strs(zf.CurrentTags, node.Tags). Strs(zf.CurrentTags, node.Tags).
Bool(zf.IsTagged, node.IsTagged()). Bool(zf.IsTagged, node.IsTagged()).
Bool(zf.WasAuthKeyTagged, wasAuthKeyTagged) Bool(zf.WasAuthKeyTagged, wasAuthKeyTagged)
logEvent.Msg("Processing RequestTags during reauth") logEvent.Msg("processing RequestTags during reauth")
// Empty RequestTags means untag node (transition to user-owned) // Empty RequestTags means untag node (transition to user-owned)
if len(requestTags) == 0 { if len(requestTags) == 0 {
@@ -1952,14 +1952,14 @@ func (s *State) updatePolicyManagerUsers() (change.Change, error) {
return change.Change{}, fmt.Errorf("listing users for policy update: %w", err) return change.Change{}, fmt.Errorf("listing users for policy update: %w", err)
} }
log.Debug().Caller().Int("user.count", len(users)).Msg("Policy manager user update initiated because user list modification detected") log.Debug().Caller().Int("user.count", len(users)).Msg("policy manager user update initiated because user list modification detected")
changed, err := s.polMan.SetUsers(users) changed, err := s.polMan.SetUsers(users)
if err != nil { if err != nil {
return change.Change{}, fmt.Errorf("updating policy manager users: %w", err) return change.Change{}, fmt.Errorf("updating policy manager users: %w", err)
} }
log.Debug().Caller().Bool("policy.changed", changed).Msg("Policy manager user update completed because SetUsers operation finished") log.Debug().Caller().Bool("policy.changed", changed).Msg("policy manager user update completed because SetUsers operation finished")
if changed { if changed {
return change.PolicyChange(), nil return change.PolicyChange(), nil

View File

@@ -403,7 +403,7 @@ func LoadConfig(path string, isFile bool) error {
if err := viper.ReadInConfig(); err != nil { if err := viper.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); ok { if _, ok := err.(viper.ConfigFileNotFoundError); ok {
log.Warn().Msg("No config file found, using defaults") log.Warn().Msg("no config file found, using defaults")
return nil return nil
} }
@@ -450,7 +450,7 @@ func validateServerConfig() error {
depr.Log() depr.Log()
if viper.IsSet("dns.extra_records") && viper.IsSet("dns.extra_records_path") { if viper.IsSet("dns.extra_records") && viper.IsSet("dns.extra_records_path") {
log.Fatal().Msg("Fatal config error: dns.extra_records and dns.extra_records_path are mutually exclusive. Please remove one of them from your config file") log.Fatal().Msg("fatal config error: dns.extra_records and dns.extra_records_path are mutually exclusive. Please remove one of them from your config file")
} }
// Collect any validation errors and return them all at once // Collect any validation errors and return them all at once

View File

@@ -392,7 +392,7 @@ func (u *User) FromClaim(claims *OIDCClaims, emailVerifiedRequired bool) {
if err == nil { if err == nil {
u.Name = claims.Username u.Name = claims.Username
} else { } else {
log.Debug().Caller().Err(err).Msgf("Username %s is not valid", claims.Username) log.Debug().Caller().Err(err).Msgf("username %s is not valid", claims.Username)
} }
if claims.EmailVerified || !FlexibleBoolean(emailVerifiedRequired) { if claims.EmailVerified || !FlexibleBoolean(emailVerifiedRequired) {