feat(proxmox): enhance VM resource tracking with auto-discovery and cached IPs

- Add VMResource wrapper type with cached IP addresses for efficient lookups
- Implement concurrent IP fetching during resource updates (limited concurrency)
- Add ReverseLookupResource for discovering VMs by IP, hostname, or alias
- Prioritize interfaces API over config for IP retrieval (offline container fallback)
- Enable routes to auto-discover Proxmox resources when no explicit config provided
- Fix configuration type from value to pointer slice for correct proxmox client retrievel
- Ensure Proxmox providers are initialized before route validation
This commit is contained in:
yusing
2026-01-25 02:25:07 +08:00
parent 64e380cc40
commit a46573cab3
9 changed files with 145 additions and 21 deletions

View File

@@ -4458,6 +4458,9 @@
"type": "integer",
"x-nullable": false,
"x-omitempty": false
},
"vmname": {
"type": "string"
}
},
"x-nullable": false,

View File

@@ -941,6 +941,8 @@ definitions:
type: string
vmid:
type: integer
vmname:
type: string
required:
- node
- vmid

View File

@@ -111,7 +111,6 @@ func (state *state) Init(data []byte) error {
g := gperr.NewGroup("config load error")
g.Go(state.initMaxMind)
g.Go(state.initProxmox)
g.Go(state.loadRouteProviders)
g.Go(state.initAutoCert)
errs := g.Wait()
@@ -119,6 +118,7 @@ func (state *state) Init(data []byte) error {
errs.Add(state.initNotification())
errs.Add(state.initACL())
errs.Add(state.initEntrypoint())
errs.Add(state.loadRouteProviders())
return errs.Error()
}

View File

@@ -36,7 +36,7 @@ type (
Docker map[string]types.DockerProviderConfig `json:"docker" yaml:"docker,omitempty" validate:"non_empty_docker_keys"`
Agents []*agent.AgentConfig `json:"agents" yaml:"agents,omitempty"`
Notification []*notif.NotificationConfig `json:"notification" yaml:"notification,omitempty"`
Proxmox []proxmox.Config `json:"proxmox" yaml:"proxmox,omitempty"`
Proxmox []*proxmox.Config `json:"proxmox" yaml:"proxmox,omitempty"`
MaxMind *maxmind.Config `json:"maxmind" yaml:"maxmind,omitempty"`
}
)

View File

@@ -5,11 +5,16 @@ import (
"encoding/json"
"errors"
"fmt"
"net"
"runtime"
"slices"
"strconv"
"strings"
"sync"
"github.com/luthermonson/go-proxmox"
"github.com/rs/zerolog/log"
"golang.org/x/sync/errgroup"
)
type Client struct {
@@ -17,10 +22,15 @@ type Client struct {
*proxmox.Cluster
Version *proxmox.Version
// id -> resource; id: lxc/<vmid> or qemu/<vmid>
resources map[string]*proxmox.ClusterResource
resources map[string]*VMResource
resourcesMu sync.RWMutex
}
type VMResource struct {
*proxmox.ClusterResource
IPs []net.IP
}
var (
ErrResourceNotFound = errors.New("resource not found")
ErrNoResources = errors.New("no resources")
@@ -29,7 +39,7 @@ var (
func NewClient(baseUrl string, opts ...proxmox.Option) *Client {
return &Client{
Client: proxmox.NewClient(baseUrl, opts...),
resources: make(map[string]*proxmox.ClusterResource),
resources: make(map[string]*VMResource),
}
}
@@ -62,8 +72,36 @@ func (c *Client) UpdateResources(ctx context.Context) error {
return err
}
clear(c.resources)
var errs errgroup.Group
errs.SetLimit(runtime.GOMAXPROCS(0) * 2)
for _, resource := range resourcesSlice {
c.resources[resource.ID] = resource
c.resources[resource.ID] = &VMResource{
ClusterResource: resource,
IPs: nil,
}
errs.Go(func() error {
node, ok := Nodes.Get(resource.Node)
if !ok {
return fmt.Errorf("node %s not found", resource.Node)
}
vmid, ok := strings.CutPrefix(resource.ID, "lxc/")
if !ok {
return nil // not a lxc resource
}
vmidInt, err := strconv.Atoi(vmid)
if err != nil {
return fmt.Errorf("invalid resource id %s: %w", resource.ID, err)
}
ips, err := node.LXCGetIPs(ctx, vmidInt)
if err != nil {
return fmt.Errorf("failed to get ips for resource %s: %w", resource.ID, err)
}
c.resources[resource.ID].IPs = ips
return nil
})
}
if err := errs.Wait(); err != nil {
return err
}
log.Debug().Str("cluster", c.Cluster.Name).Msgf("[proxmox] updated %d resources", len(c.resources))
return nil
@@ -72,7 +110,7 @@ func (c *Client) UpdateResources(ctx context.Context) error {
// GetResource gets a resource by kind and id.
// kind: lxc or qemu
// id: <vmid>
func (c *Client) GetResource(kind string, id int) (*proxmox.ClusterResource, error) {
func (c *Client) GetResource(kind string, id int) (*VMResource, error) {
c.resourcesMu.RLock()
defer c.resourcesMu.RUnlock()
resource, ok := c.resources[kind+"/"+strconv.Itoa(id)]
@@ -82,6 +120,33 @@ func (c *Client) GetResource(kind string, id int) (*proxmox.ClusterResource, err
return resource, nil
}
// ReverseLookupResource looks up a resource by ip address, hostname, alias or all of them
func (c *Client) ReverseLookupResource(ip net.IP, hostname string, alias string) (*VMResource, error) {
c.resourcesMu.RLock()
defer c.resourcesMu.RUnlock()
shouldCheckIP := ip != nil && !ip.IsLoopback() && !ip.IsUnspecified()
shouldCheckHostname := hostname != ""
shouldCheckAlias := alias != ""
if shouldCheckHostname {
hostname, _, _ = strings.Cut(hostname, ".")
}
for _, resource := range c.resources {
if shouldCheckIP && slices.ContainsFunc(resource.IPs, func(a net.IP) bool { return a.Equal(ip) }) {
return resource, nil
}
if shouldCheckHostname && resource.Name == hostname {
return resource, nil
}
if shouldCheckAlias && resource.Name == alias {
return resource, nil
}
}
return nil, ErrResourceNotFound
}
// Key implements pool.Object
func (c *Client) Key() string {
return c.Cluster.ID

View File

@@ -170,17 +170,17 @@ func getIPFromNet(s string) (res []net.IP) { // name:...,bridge:...,gw=..,ip=...
}
// LXCGetIPs returns the ip addresses of the container
// it first tries to get the ip addresses from the config
// if that fails, it gets the ip addresses from the interfaces
// it first tries to get the ip addresses from the interfaces
// if that fails, it gets the ip addresses from the config (offline containers)
func (n *Node) LXCGetIPs(ctx context.Context, vmid int) (res []net.IP, err error) {
ips, err := n.LXCGetIPsFromConfig(ctx, vmid)
ips, err := n.LXCGetIPsFromInterfaces(ctx, vmid)
if err != nil {
return nil, err
}
if len(ips) > 0 {
return ips, nil
}
ips, err = n.LXCGetIPsFromInterfaces(ctx, vmid)
ips, err = n.LXCGetIPsFromConfig(ctx, vmid)
if err != nil {
return nil, err
}

View File

@@ -7,8 +7,6 @@ import (
"io"
"strings"
"time"
"github.com/luthermonson/go-proxmox"
)
// const statsScriptLocation = "/tmp/godoxy-stats.sh"
@@ -105,7 +103,7 @@ func (n *Node) LXCStats(ctx context.Context, vmid int, stream bool) (io.ReadClos
return pr, nil
}
func writeLXCStatsLine(resource *proxmox.ClusterResource, w io.Writer) error {
func writeLXCStatsLine(resource *VMResource, w io.Writer) error {
cpu := fmt.Sprintf("%.1f%%", resource.CPU*100)
memUsage := formatIECBytes(resource.Mem)

View File

@@ -12,6 +12,7 @@ import (
type NodeConfig struct {
Node string `json:"node" validate:"required"`
VMID int `json:"vmid" validate:"required"`
VMName string `json:"vmname,omitempty"`
Service string `json:"service,omitempty"`
} // @name ProxmoxNodeConfig
@@ -54,6 +55,10 @@ func (n *Node) Name() string {
return n.name
}
func (n *Node) Client() *Client {
return n.client
}
func (n *Node) String() string {
return fmt.Sprintf("%s (%s)", n.name, n.id)
}

View File

@@ -132,6 +132,10 @@ func (r Routes) Contains(alias string) bool {
}
func (r *Route) Validate() gperr.Error {
// wait for alias to be set
if r.Alias == "" {
return nil
}
// pcs := make([]uintptr, 1)
// runtime.Callers(2, pcs)
// f := runtime.FuncForPC(pcs[0])
@@ -182,22 +186,35 @@ func (r *Route) validate() gperr.Error {
r.Idlewatcher.Proxmox = r.Proxmox
}
if r.Idlewatcher != nil && r.Idlewatcher.Proxmox != nil {
node := r.Idlewatcher.Proxmox.Node
vmid := r.Idlewatcher.Proxmox.VMID
if node == "" {
if r.Proxmox == nil && r.Idlewatcher != nil && r.Idlewatcher.Proxmox != nil {
r.Proxmox = r.Idlewatcher.Proxmox
}
if r.Proxmox != nil {
nodeName := r.Proxmox.Node
vmid := r.Proxmox.VMID
if nodeName == "" {
return gperr.Errorf("node (proxmox node name) is required")
}
if vmid <= 0 {
return gperr.Errorf("vmid (lxc id) is required")
}
node, ok := proxmox.Nodes.Get(nodeName)
if !ok {
return gperr.Errorf("proxmox node %s not found in pool", node)
}
res, err := node.Client().GetResource("lxc", vmid)
if err != nil {
return gperr.Wrap(err) // ErrResourceNotFound
}
r.Proxmox.VMName = res.Name
if r.Host == DefaultHost {
containerName := r.Idlewatcher.ContainerName()
// get ip addresses of the vmid
node, ok := proxmox.Nodes.Get(node)
if !ok {
return gperr.Errorf("proxmox node %s not found in pool", node)
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
@@ -317,6 +334,33 @@ func (r *Route) validate() gperr.Error {
}
}
if r.Proxmox == nil && r.Container == nil {
proxmoxProviders := config.WorkingState.Load().Value().Providers.Proxmox
if len(proxmoxProviders) > 0 {
// it's fine if ip is nil
hostname := r.ProxyURL.Hostname()
ip := net.ParseIP(hostname)
for _, p := range config.WorkingState.Load().Value().Providers.Proxmox {
resource, _ := p.Client().ReverseLookupResource(ip, hostname, r.Alias)
// reverse lookup resource by ip address, hostname or alias
if resource != nil {
r.Proxmox = &proxmox.NodeConfig{
Node: resource.Node,
VMID: int(resource.VMID),
VMName: resource.Name,
Service: r.Alias,
}
log.Info().
Str("node", resource.Node).
Int("vmid", int(resource.VMID)).
Str("vmname", resource.Name).
Msgf("found proxmox resource for route %q", r.Alias)
break
}
}
}
}
if !r.UseHealthCheck() && (r.UseLoadBalance() || r.UseIdleWatcher()) {
errs.Adds("cannot disable healthcheck when loadbalancer or idle watcher is enabled")
}
@@ -496,6 +540,13 @@ func (r *Route) References() []string {
}
return []string{r.Container.Image.Name, aliasRef, r.Container.Image.Author}
}
if r.Proxmox != nil {
if r.Proxmox.VMName != r.Alias {
return []string{r.Proxmox.VMName, aliasRef, r.Proxmox.Service}
}
return []string{r.Proxmox.Service, aliasRef}
}
return []string{aliasRef}
}