feat(proxmox): better node-level routes auto-discovery with pointer VMID

- Add BaseURL field to Client for node-level route configuration
- Change VMID from int to *int to support three states:
  - nil: auto-discover node or VM from hostname/IP/alias
  - 0: node-level route (direct to Proxmox node API)
  - >0: LXC/QEMU resource route with container control
- Change Service string to Services []string for multi-service support
- Implement proper node-level route handling: HTTPS scheme,
  hostname from node BaseURL, default port 8006
- Move initial UpdateResources call to Init before starting loop
- Move proxmox auto-discovery earlier in route validation

BREAKING: NodeConfig.VMID is now a pointer type; NodeConfig.Service
renamed to Services (backward compatible via alias)
This commit is contained in:
yusing
2026-01-25 22:19:26 +08:00
parent 83976646db
commit 1ace5e641d
4 changed files with 86 additions and 63 deletions

View File

@@ -6,6 +6,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"net" "net"
"net/url"
"runtime" "runtime"
"slices" "slices"
"strconv" "strconv"
@@ -21,6 +22,7 @@ type Client struct {
*proxmox.Client *proxmox.Client
*proxmox.Cluster *proxmox.Cluster
Version *proxmox.Version Version *proxmox.Version
BaseURL *url.URL
// id -> resource; id: lxc/<vmid> or qemu/<vmid> // id -> resource; id: lxc/<vmid> or qemu/<vmid>
resources map[string]*VMResource resources map[string]*VMResource
resourcesMu sync.RWMutex resourcesMu sync.RWMutex
@@ -44,6 +46,11 @@ func NewClient(baseUrl string, opts ...proxmox.Option) *Client {
} }
func (c *Client) UpdateClusterInfo(ctx context.Context) (err error) { func (c *Client) UpdateClusterInfo(ctx context.Context) (err error) {
baseURL, err := url.Parse(c.Client.GetBaseURL())
if err != nil {
return err
}
c.BaseURL = baseURL
c.Version, err = c.Client.Version(ctx) c.Version, err = c.Client.Version(ctx)
if err != nil { if err != nil {
return err return err

View File

@@ -96,6 +96,15 @@ func (c *Config) Init(ctx context.Context) gperr.Error {
return gperr.New("failed to fetch proxmox cluster info").With(err) return gperr.New("failed to fetch proxmox cluster info").With(err)
} }
{
reqCtx, reqCtxCancel := context.WithTimeout(ctx, ResourcePollInterval)
err := c.client.UpdateResources(reqCtx)
reqCtxCancel()
if err != nil {
log.Warn().Err(err).Str("cluster", c.client.Cluster.Name).Msg("[proxmox] failed to update resources")
}
}
go c.updateResourcesLoop(ctx) go c.updateResourcesLoop(ctx)
return nil return nil
} }
@@ -106,15 +115,6 @@ func (c *Config) updateResourcesLoop(ctx context.Context) {
log.Trace().Str("cluster", c.client.Cluster.Name).Msg("[proxmox] starting resources update loop") log.Trace().Str("cluster", c.client.Cluster.Name).Msg("[proxmox] starting resources update loop")
{
reqCtx, reqCtxCancel := context.WithTimeout(ctx, ResourcePollInterval)
err := c.client.UpdateResources(reqCtx)
reqCtxCancel()
if err != nil {
log.Warn().Err(err).Str("cluster", c.client.Cluster.Name).Msg("[proxmox] failed to update resources")
}
}
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():

View File

@@ -10,10 +10,10 @@ import (
) )
type NodeConfig struct { type NodeConfig struct {
Node string `json:"node" validate:"required"` Node string `json:"node"`
VMID int `json:"vmid" validate:"required"` VMID *int `json:"vmid"` // unset: auto discover; explicit 0: node-level route; >0: lxc/qemu resource route
VMName string `json:"vmname,omitempty"` VMName string `json:"vmname,omitempty"`
Service string `json:"service,omitempty"` Services []string `json:"services,omitempty" aliases:"service"`
} // @name ProxmoxNodeConfig } // @name ProxmoxNodeConfig
type Node struct { type Node struct {

View File

@@ -185,21 +185,66 @@ func (r *Route) validate() gperr.Error {
if r.Proxmox != nil && r.Idlewatcher != nil { if r.Proxmox != nil && r.Idlewatcher != nil {
r.Idlewatcher.Proxmox = &types.ProxmoxConfig{ r.Idlewatcher.Proxmox = &types.ProxmoxConfig{
Node: r.Proxmox.Node, Node: r.Proxmox.Node,
VMID: r.Proxmox.VMID, }
if r.Proxmox.VMID != nil {
r.Idlewatcher.Proxmox.VMID = *r.Proxmox.VMID
} }
} }
if r.Proxmox == nil && r.Idlewatcher != nil && r.Idlewatcher.Proxmox != nil { if r.Proxmox == nil && r.Idlewatcher != nil && r.Idlewatcher.Proxmox != nil {
r.Proxmox = &proxmox.NodeConfig{ r.Proxmox = &proxmox.NodeConfig{
Node: r.Idlewatcher.Proxmox.Node, Node: r.Idlewatcher.Proxmox.Node,
VMID: r.Idlewatcher.Proxmox.VMID, VMID: &r.Idlewatcher.Proxmox.VMID,
}
}
if (r.Proxmox == nil || r.Proxmox.Node == "" || r.Proxmox.VMID == nil) && r.Container == nil {
proxmoxProviders := config.WorkingState.Load().Value().Providers.Proxmox
if len(proxmoxProviders) > 0 {
// it's fine if ip is nil
hostname := r.Host
ip := net.ParseIP(hostname)
for _, p := range proxmoxProviders {
// First check if hostname, IP, or alias matches a node (node-level route)
if nodeName := p.Client().ReverseLookupNode(hostname, ip, r.Alias); nodeName != "" {
zero := 0
if r.Proxmox == nil {
r.Proxmox = &proxmox.NodeConfig{}
}
r.Proxmox.Node = nodeName
r.Proxmox.VMID = &zero
r.Proxmox.VMName = ""
log.Info().
Str("node", nodeName).
Msgf("found proxmox node for route %q", r.Alias)
break
}
// Then check if hostname, IP, or alias matches a VM resource
resource, _ := p.Client().ReverseLookupResource(ip, hostname, r.Alias)
if resource != nil {
vmid := int(resource.VMID)
if r.Proxmox == nil {
r.Proxmox = &proxmox.NodeConfig{}
}
r.Proxmox.Node = resource.Node
r.Proxmox.VMID = &vmid
r.Proxmox.VMName = resource.Name
log.Info().
Str("node", resource.Node).
Int("vmid", int(resource.VMID)).
Str("vmname", resource.Name).
Msgf("found proxmox resource for route %q", r.Alias)
break
}
}
} }
} }
if r.Proxmox != nil { if r.Proxmox != nil {
nodeName := r.Proxmox.Node nodeName := r.Proxmox.Node
vmid := r.Proxmox.VMID vmid := r.Proxmox.VMID
if nodeName == "" { if nodeName == "" || vmid == nil {
return gperr.Errorf("node (proxmox node name) is required") return gperr.Errorf("node (proxmox node name) is required")
} }
@@ -208,9 +253,19 @@ func (r *Route) validate() gperr.Error {
return gperr.Errorf("proxmox node %s not found in pool", nodeName) return gperr.Errorf("proxmox node %s not found in pool", nodeName)
} }
// Node-level route (VMID = 0) - no container control needed // Node-level route (VMID = 0)
if vmid > 0 { if *vmid == 0 {
res, err := node.Client().GetResource("lxc", vmid) r.Scheme = route.SchemeHTTPS
if r.Host == DefaultHost {
r.Host = node.Client().BaseURL.Hostname()
}
port, _ := strconv.Atoi(node.Client().BaseURL.Port())
if port == 0 {
port = 8006
}
r.Port.Proxy = port
} else {
res, err := node.Client().GetResource("lxc", *vmid)
if err != nil { if err != nil {
return gperr.Wrap(err) // ErrResourceNotFound return gperr.Wrap(err) // ErrResourceNotFound
} }
@@ -235,14 +290,14 @@ func (r *Route) validate() gperr.Error {
l := log.With().Str("container", containerName).Logger() l := log.With().Str("container", containerName).Logger()
l.Info().Msg("checking if container is running") l.Info().Msg("checking if container is running")
running, err := node.LXCIsRunning(ctx, vmid) running, err := node.LXCIsRunning(ctx, *vmid)
if err != nil { if err != nil {
return gperr.New("failed to check container state").With(err) return gperr.New("failed to check container state").With(err)
} }
if !running { if !running {
l.Info().Msg("starting container") l.Info().Msg("starting container")
if err := node.LXCAction(ctx, vmid, proxmox.LXCStart); err != nil { if err := node.LXCAction(ctx, *vmid, proxmox.LXCStart); err != nil {
return gperr.New("failed to start container").With(err) return gperr.New("failed to start container").With(err)
} }
} }
@@ -336,45 +391,6 @@ func (r *Route) validate() gperr.Error {
} }
} }
if r.Proxmox == nil && r.Container == nil && r.ProxyURL != nil {
proxmoxProviders := config.WorkingState.Load().Value().Providers.Proxmox
if len(proxmoxProviders) > 0 {
// it's fine if ip is nil
hostname := r.ProxyURL.Hostname()
ip := net.ParseIP(hostname)
for _, p := range config.WorkingState.Load().Value().Providers.Proxmox {
// First check if hostname, IP, or alias matches a node (node-level route)
if nodeName := p.Client().ReverseLookupNode(hostname, ip, r.Alias); nodeName != "" {
r.Proxmox = &proxmox.NodeConfig{
Node: nodeName,
VMID: 0, // node-level route, no specific VM
VMName: "",
}
log.Info().
Str("node", nodeName).
Msgf("found proxmox node for route %q", r.Alias)
break
}
// Then check if hostname, IP, or alias matches a VM resource
resource, _ := p.Client().ReverseLookupResource(ip, hostname, r.Alias)
if resource != nil {
r.Proxmox = &proxmox.NodeConfig{
Node: resource.Node,
VMID: int(resource.VMID),
VMName: resource.Name,
}
log.Info().
Str("node", resource.Node).
Int("vmid", int(resource.VMID)).
Str("vmname", resource.Name).
Msgf("found proxmox resource for route %q", r.Alias)
break
}
}
}
}
if !r.UseHealthCheck() && (r.UseLoadBalance() || r.UseIdleWatcher()) { if !r.UseHealthCheck() && (r.UseLoadBalance() || r.UseIdleWatcher()) {
errs.Adds("cannot disable healthcheck when loadbalancer or idle watcher is enabled") errs.Adds("cannot disable healthcheck when loadbalancer or idle watcher is enabled")
} }
@@ -556,11 +572,11 @@ func (r *Route) References() []string {
} }
if r.Proxmox != nil { if r.Proxmox != nil {
if r.Proxmox.Service != "" && r.Proxmox.Service != aliasRef { if len(r.Proxmox.Services) > 0 && r.Proxmox.Services[0] != aliasRef {
if r.Proxmox.VMName != aliasRef { if r.Proxmox.VMName != aliasRef {
return []string{r.Proxmox.VMName, aliasRef, r.Proxmox.Service} return []string{r.Proxmox.VMName, aliasRef, r.Proxmox.Services[0]}
} }
return []string{r.Proxmox.Service, aliasRef} return []string{r.Proxmox.Services[0], aliasRef}
} else { } else {
if r.Proxmox.VMName != aliasRef { if r.Proxmox.VMName != aliasRef {
return []string{r.Proxmox.VMName, aliasRef} return []string{r.Proxmox.VMName, aliasRef}