cmd/dev: add local development server tool

Add a lightweight dev tool that starts a headscale server on localhost
with a pre-created user and pre-auth key, ready for connecting real
tailscale nodes via mts.

The tool builds the headscale binary, writes a minimal dev config
(SQLite, public DERP, debug logging), starts the server as a
subprocess, and prints a banner with the server URL, auth key, and
mts usage instructions.

Usage: go run ./cmd/dev
       make dev-server
This commit is contained in:
Kristoffer Dalby
2026-04-13 12:41:04 +00:00
parent 0cf27eba77
commit 461a0e2bea
3 changed files with 403 additions and 0 deletions

View File

@@ -105,6 +105,11 @@ clean:
.PHONY: dev
dev: fmt lint test build
# Start a local headscale dev server (use mts to add nodes)
.PHONY: dev-server
dev-server:
go run ./cmd/dev
# Help target
.PHONY: help
help:

96
cmd/dev/README.md Normal file
View File

@@ -0,0 +1,96 @@
# cmd/dev -- Local Development Environment
Starts a headscale server on localhost with a pre-created user and
pre-auth key. Pair with `mts` to add real tailscale nodes.
## Quick start
```bash
# Terminal 1: start headscale
go run ./cmd/dev
# Terminal 2: start mts server
go tool mts server run
# Terminal 3: add and connect nodes
go tool mts server add node1
go tool mts server add node2
# Disable logtail (avoids startup delays, see "Known issues" below)
for n in node1 node2; do
cat > ~/.config/multi-tailscale-dev/$n/env.txt << 'EOF'
TS_NO_LOGS_NO_SUPPORT=true
EOF
done
# Restart nodes so env.txt takes effect
go tool mts server stop node1 && go tool mts server start node1
go tool mts server stop node2 && go tool mts server start node2
# Connect to headscale (use the auth key printed by cmd/dev)
go tool mts node1 up --login-server=http://127.0.0.1:8080 --authkey=<KEY> --reset
go tool mts node2 up --login-server=http://127.0.0.1:8080 --authkey=<KEY> --reset
# Verify
go tool mts node1 status
```
## Flags
| Flag | Default | Description |
| -------- | ------- | ---------------------------- |
| `--port` | 8080 | Headscale listen port |
| `--keep` | false | Keep state directory on exit |
The metrics/debug port is `port + 1010` (default 9090) and the gRPC
port is `port + 42363` (default 50443).
## What it does
1. Builds the headscale binary into a temp directory
2. Writes a minimal dev config (SQLite, public DERP, debug logging)
3. Starts `headscale serve` as a subprocess
4. Creates a "dev" user and a reusable 24h pre-auth key via the CLI
5. Prints a banner with server URL, auth key, and usage instructions
6. Blocks until Ctrl+C, then kills headscale
State lives in `/tmp/headscale-dev-*/`. Pass `--keep` to preserve it
across restarts (useful for inspecting the database or reusing keys).
## Useful endpoints
- `http://127.0.0.1:8080/health` -- health check
- `http://127.0.0.1:9090/debug/ping` -- interactive ping UI
- `http://127.0.0.1:9090/debug/ping?node=1` -- quick-ping a node
- `POST http://127.0.0.1:9090/debug/ping` with `node=<id>` -- trigger ping
## Managing headscale
The banner prints the full path to the built binary and config. Use it
for any headscale CLI command:
```bash
/tmp/headscale-dev-*/headscale -c /tmp/headscale-dev-*/config.yaml nodes list
/tmp/headscale-dev-*/headscale -c /tmp/headscale-dev-*/config.yaml users list
```
## Known issues
### Logtail delays on mts nodes
Freshly created `mts` instances may take 30+ seconds to start if
`~/.local/share/tailscale/` contains stale logtail cache from previous
tailscaled runs. The daemon blocks trying to upload old logs before
creating its socket.
Fix: write `TS_NO_LOGS_NO_SUPPORT=true` to each instance's `env.txt`
before starting (or restart after writing). See the quick start above.
### mts node cleanup
`mts` stores state in `~/.config/multi-tailscale-dev/`. Old instances
accumulate over time. Clean them with:
```bash
go tool mts server rm <name>
```

302
cmd/dev/main.go Normal file
View File

@@ -0,0 +1,302 @@
// cmd/dev starts a local headscale development server with a pre-created
// user and pre-auth key, ready for connecting tailscale nodes via mts.
package main
import (
"context"
"encoding/json"
"errors"
"flag"
"fmt"
"log"
"net/http"
"os"
"os/exec"
"os/signal"
"path/filepath"
"strconv"
"syscall"
"time"
)
var (
port = flag.Int("port", 8080, "headscale listen port")
keep = flag.Bool("keep", false, "keep state directory on exit")
)
var errHealthTimeout = errors.New("health check timed out")
var errEmptyAuthKey = errors.New("empty auth key in response")
const devConfig = `---
server_url: http://127.0.0.1:%d
listen_addr: 127.0.0.1:%d
metrics_listen_addr: 127.0.0.1:%d
grpc_listen_addr: 127.0.0.1:%d
grpc_allow_insecure: true
noise:
private_key_path: %s/noise_private.key
prefixes:
v4: 100.64.0.0/10
v6: fd7a:115c:a1e0::/48
allocation: sequential
database:
type: sqlite
sqlite:
path: %s/db.sqlite
write_ahead_log: true
derp:
server:
enabled: false
urls:
- https://controlplane.tailscale.com/derpmap/default
auto_update_enabled: false
dns:
magic_dns: true
base_domain: headscale.dev
override_local_dns: false
log:
level: debug
format: text
policy:
mode: database
unix_socket: %s/headscale.sock
unix_socket_permission: "0770"
`
func main() {
flag.Parse()
log.SetFlags(0)
http.DefaultClient.Timeout = 2 * time.Second
http.DefaultClient.CheckRedirect = func(*http.Request, []*http.Request) error {
return http.ErrUseLastResponse
}
err := run()
if err != nil {
log.Fatal(err)
}
}
func run() error {
metricsPort := *port + 1010 // default 9090
grpcPort := *port + 42363 // default 50443
tmpDir, err := os.MkdirTemp("", "headscale-dev-")
if err != nil {
return fmt.Errorf("creating temp dir: %w", err)
}
if !*keep {
defer os.RemoveAll(tmpDir)
}
// Write config.
configPath := filepath.Join(tmpDir, "config.yaml")
configContent := fmt.Sprintf(devConfig,
*port, *port, metricsPort, grpcPort,
tmpDir, tmpDir, tmpDir,
)
err = os.WriteFile(configPath, []byte(configContent), 0o600)
if err != nil {
return fmt.Errorf("writing config: %w", err)
}
// Build headscale.
fmt.Println("Building headscale...")
hsBin := filepath.Join(tmpDir, "headscale")
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer stop()
build := exec.CommandContext(ctx, "go", "build", "-o", hsBin, "./cmd/headscale")
build.Stdout = os.Stdout
build.Stderr = os.Stderr
err = build.Run()
if err != nil {
return fmt.Errorf("building headscale: %w", err)
}
// Start headscale serve.
fmt.Println("Starting headscale server...")
serve := exec.CommandContext(ctx, hsBin, "serve", "-c", configPath)
serve.Stdout = os.Stdout
serve.Stderr = os.Stderr
err = serve.Start()
if err != nil {
return fmt.Errorf("starting headscale: %w", err)
}
// Wait for server to be ready.
healthURL := fmt.Sprintf("http://127.0.0.1:%d/health", *port)
err = waitForHealth(ctx, healthURL, 30*time.Second)
if err != nil {
return fmt.Errorf("waiting for headscale: %w", err)
}
// Create user.
fmt.Println("Creating user and pre-auth key...")
userJSON, err := runHS(ctx, hsBin, configPath, "users", "create", "dev", "-o", "json")
if err != nil {
return fmt.Errorf("creating user: %w", err)
}
userID, err := extractUserID(userJSON)
if err != nil {
return fmt.Errorf("parsing user: %w", err)
}
// Create pre-auth key.
keyJSON, err := runHS(
ctx, hsBin, configPath,
"preauthkeys", "create",
"-u", strconv.FormatUint(userID, 10),
"--reusable",
"-e", "24h",
"-o", "json",
)
if err != nil {
return fmt.Errorf("creating pre-auth key: %w", err)
}
authKey, err := extractAuthKey(keyJSON)
if err != nil {
return fmt.Errorf("parsing pre-auth key: %w", err)
}
// Print banner.
fmt.Printf(`
=== Headscale Dev Environment ===
Server: http://127.0.0.1:%d
Metrics: http://127.0.0.1:%d
Debug: http://127.0.0.1:%d/debug/ping
Config: %s
State: %s
Pre-auth key: %s
Connect nodes with mts:
go tool mts server run # start mts (once, another terminal)
go tool mts server add node1 # create a node
go tool mts node1 up --login-server=http://127.0.0.1:%d --authkey=%s
go tool mts node1 status # check connection
Manage headscale:
%s -c %s nodes list
%s -c %s users list
Press Ctrl+C to stop.
`,
*port, metricsPort, metricsPort,
configPath, tmpDir,
authKey,
*port, authKey,
hsBin, configPath,
hsBin, configPath,
)
// Wait for headscale to exit.
err = serve.Wait()
if err != nil {
// Context cancellation is expected on Ctrl+C.
if ctx.Err() != nil {
fmt.Println("\nShutting down...")
return nil
}
return fmt.Errorf("headscale exited: %w", err)
}
return nil
}
// waitForHealth polls the health endpoint until it returns 200 or the
// timeout expires.
func waitForHealth(ctx context.Context, url string, timeout time.Duration) error {
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
if ctx.Err() != nil {
return ctx.Err()
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
return fmt.Errorf("creating request: %w", err)
}
resp, err := http.DefaultClient.Do(req)
if err == nil {
resp.Body.Close()
if resp.StatusCode == http.StatusOK {
return nil
}
}
// Busy-wait is acceptable for a dev tool polling a local server.
time.Sleep(200 * time.Millisecond) //nolint:forbidigo
}
return errHealthTimeout
}
// runHS executes a headscale CLI command and returns its stdout.
func runHS(ctx context.Context, bin, config string, args ...string) ([]byte, error) {
fullArgs := append([]string{"-c", config}, args...)
cmd := exec.CommandContext(ctx, bin, fullArgs...)
cmd.Stderr = os.Stderr
return cmd.Output()
}
// extractUserID parses the JSON output of "users create" and returns the
// user ID.
func extractUserID(data []byte) (uint64, error) {
var user struct {
ID uint64 `json:"id"`
}
err := json.Unmarshal(data, &user)
if err != nil {
return 0, fmt.Errorf("unmarshalling user JSON: %w (raw: %s)", err, data)
}
return user.ID, nil
}
// extractAuthKey parses the JSON output of "preauthkeys create" and
// returns the key string.
func extractAuthKey(data []byte) (string, error) {
var key struct {
Key string `json:"key"`
}
err := json.Unmarshal(data, &key)
if err != nil {
return "", fmt.Errorf("unmarshalling key JSON: %w (raw: %s)", err, data)
}
if key.Key == "" {
return "", errEmptyAuthKey
}
return key.Key, nil
}