refactor(proxmox): change VMID type from int to uint64 across Proxmox provider

Updates VMID parameter and field types from int to uint64 throughout the Proxmox provider implementation,
including API request structures, provider structs, client methods, and LXC-related functions.
Also updates string conversion calls from strconv.Itoa to strconv.FormatUint.
This commit is contained in:
yusing
2026-02-10 16:53:07 +08:00
parent 3aba728a3a
commit 978dd886c0
9 changed files with 21 additions and 24 deletions

View File

@@ -2,5 +2,5 @@ package proxmoxapi
type ActionRequest struct { type ActionRequest struct {
Node string `uri:"node" binding:"required"` Node string `uri:"node" binding:"required"`
VMID int `uri:"vmid" binding:"required"` VMID uint64 `uri:"vmid" binding:"required"`
} // @name ProxmoxVMActionRequest } // @name ProxmoxVMActionRequest

View File

@@ -11,10 +11,7 @@ import (
"github.com/yusing/goutils/http/websocket" "github.com/yusing/goutils/http/websocket"
) )
type StatsRequest struct { type StatsRequest ActionRequest
Node string `uri:"node" binding:"required"`
VMID int `uri:"vmid" binding:"required"`
}
// @x-id "nodeStats" // @x-id "nodeStats"
// @BasePath /api/v1 // @BasePath /api/v1

View File

@@ -18,7 +18,7 @@ import (
type ProxmoxProvider struct { type ProxmoxProvider struct {
*proxmox.Node *proxmox.Node
vmid int vmid uint64
lxcName string lxcName string
running bool running bool
} }
@@ -27,7 +27,7 @@ const proxmoxStateCheckInterval = 1 * time.Second
var ErrNodeNotFound = gperr.New("node not found in pool") var ErrNodeNotFound = gperr.New("node not found in pool")
func NewProxmoxProvider(ctx context.Context, nodeName string, vmid int) (idlewatcher.Provider, error) { func NewProxmoxProvider(ctx context.Context, nodeName string, vmid uint64) (idlewatcher.Provider, error) {
if nodeName == "" || vmid == 0 { if nodeName == "" || vmid == 0 {
return nil, errors.New("node name and vmid are required") return nil, errors.New("node name and vmid are required")
} }
@@ -102,7 +102,7 @@ func (p *ProxmoxProvider) Watch(ctx context.Context) (<-chan watcher.Event, <-ch
event := watcher.Event{ event := watcher.Event{
Type: events.EventTypeDocker, Type: events.EventTypeDocker,
ActorID: strconv.Itoa(p.vmid), ActorID: strconv.FormatUint(p.vmid, 10),
ActorName: p.lxcName, ActorName: p.lxcName,
} }
for { for {

View File

@@ -125,10 +125,10 @@ func (c *Client) UpdateResources(ctx context.Context) error {
// GetResource gets a resource by kind and id. // GetResource gets a resource by kind and id.
// kind: lxc or qemu // kind: lxc or qemu
// id: <vmid> // id: <vmid>
func (c *Client) GetResource(kind string, id int) (*VMResource, error) { func (c *Client) GetResource(kind string, id uint64) (*VMResource, error) {
c.resourcesMu.RLock() c.resourcesMu.RLock()
defer c.resourcesMu.RUnlock() defer c.resourcesMu.RUnlock()
resource, ok := c.resources[kind+"/"+strconv.Itoa(id)] resource, ok := c.resources[kind+"/"+strconv.FormatUint(id, 10)]
if !ok { if !ok {
return nil, ErrResourceNotFound return nil, ErrResourceNotFound
} }

View File

@@ -41,7 +41,7 @@ const (
proxmoxTaskCheckInterval = 300 * time.Millisecond proxmoxTaskCheckInterval = 300 * time.Millisecond
) )
func (n *Node) LXCAction(ctx context.Context, vmid int, action LXCAction) error { func (n *Node) LXCAction(ctx context.Context, vmid uint64, action LXCAction) error {
var upid proxmox.UPID var upid proxmox.UPID
if err := n.client.Post(ctx, fmt.Sprintf("/nodes/%s/lxc/%d/status/%s", n.name, vmid, action), nil, &upid); err != nil { if err := n.client.Post(ctx, fmt.Sprintf("/nodes/%s/lxc/%d/status/%s", n.name, vmid, action), nil, &upid); err != nil {
return err return err
@@ -82,7 +82,7 @@ func (n *Node) LXCAction(ctx context.Context, vmid int, action LXCAction) error
} }
} }
func (n *Node) LXCName(ctx context.Context, vmid int) (string, error) { func (n *Node) LXCName(ctx context.Context, vmid uint64) (string, error) {
var name nameOnly var name nameOnly
if err := n.client.Get(ctx, fmt.Sprintf("/nodes/%s/lxc/%d/status/current", n.name, vmid), &name); err != nil { if err := n.client.Get(ctx, fmt.Sprintf("/nodes/%s/lxc/%d/status/current", n.name, vmid), &name); err != nil {
return "", err return "", err
@@ -90,7 +90,7 @@ func (n *Node) LXCName(ctx context.Context, vmid int) (string, error) {
return name.Name, nil return name.Name, nil
} }
func (n *Node) LXCStatus(ctx context.Context, vmid int) (LXCStatus, error) { func (n *Node) LXCStatus(ctx context.Context, vmid uint64) (LXCStatus, error) {
var status statusOnly var status statusOnly
if err := n.client.Get(ctx, fmt.Sprintf("/nodes/%s/lxc/%d/status/current", n.name, vmid), &status); err != nil { if err := n.client.Get(ctx, fmt.Sprintf("/nodes/%s/lxc/%d/status/current", n.name, vmid), &status); err != nil {
return "", err return "", err
@@ -98,17 +98,17 @@ func (n *Node) LXCStatus(ctx context.Context, vmid int) (LXCStatus, error) {
return status.Status, nil return status.Status, nil
} }
func (n *Node) LXCIsRunning(ctx context.Context, vmid int) (bool, error) { func (n *Node) LXCIsRunning(ctx context.Context, vmid uint64) (bool, error) {
status, err := n.LXCStatus(ctx, vmid) status, err := n.LXCStatus(ctx, vmid)
return status == LXCStatusRunning, err return status == LXCStatusRunning, err
} }
func (n *Node) LXCIsStopped(ctx context.Context, vmid int) (bool, error) { func (n *Node) LXCIsStopped(ctx context.Context, vmid uint64) (bool, error) {
status, err := n.LXCStatus(ctx, vmid) status, err := n.LXCStatus(ctx, vmid)
return status == LXCStatusStopped, err return status == LXCStatusStopped, err
} }
func (n *Node) LXCSetShutdownTimeout(ctx context.Context, vmid int, timeout time.Duration) error { func (n *Node) LXCSetShutdownTimeout(ctx context.Context, vmid uint64, timeout time.Duration) error {
return n.client.Put(ctx, fmt.Sprintf("/nodes/%s/lxc/%d/config", n.name, vmid), map[string]interface{}{ return n.client.Put(ctx, fmt.Sprintf("/nodes/%s/lxc/%d/config", n.name, vmid), map[string]interface{}{
"startup": fmt.Sprintf("down=%.0f", timeout.Seconds()), "startup": fmt.Sprintf("down=%.0f", timeout.Seconds()),
}, nil) }, nil)

View File

@@ -42,7 +42,7 @@ import (
// //
// - format: "STATUS|CPU%%|MEM USAGE/LIMIT|MEM%%|NET I/O|BLOCK I/O" // - format: "STATUS|CPU%%|MEM USAGE/LIMIT|MEM%%|NET I/O|BLOCK I/O"
// - example: running|31.1%|9.6GiB/20GiB|48.87%|4.7GiB/3.3GiB|25GiB/36GiB // - example: running|31.1%|9.6GiB/20GiB|48.87%|4.7GiB/3.3GiB|25GiB/36GiB
func (n *Node) LXCStats(ctx context.Context, vmid int, stream bool) (io.ReadCloser, error) { func (n *Node) LXCStats(ctx context.Context, vmid uint64, stream bool) (io.ReadCloser, error) {
if !stream { if !stream {
resource, err := n.client.GetResource("lxc", vmid) resource, err := n.client.GetResource("lxc", vmid)
if err != nil { if err != nil {

View File

@@ -12,7 +12,7 @@ import (
type NodeConfig struct { type NodeConfig struct {
Node string `json:"node"` Node string `json:"node"`
VMID *int `json:"vmid"` // unset: auto discover; explicit 0: node-level route; >0: lxc/qemu resource route VMID *uint64 `json:"vmid"` // unset: auto discover; explicit 0: node-level route; >0: lxc/qemu resource route
VMName string `json:"vmname,omitempty"` VMName string `json:"vmname,omitempty"`
Services []string `json:"services,omitempty" aliases:"service"` Services []string `json:"services,omitempty" aliases:"service"`
Files []string `json:"files,omitempty" aliases:"file"` Files []string `json:"files,omitempty" aliases:"file"`

View File

@@ -212,7 +212,7 @@ func (r *Route) validate() error {
for _, p := range proxmoxProviders { for _, p := range proxmoxProviders {
// First check if hostname, IP, or alias matches a node (node-level route) // First check if hostname, IP, or alias matches a node (node-level route)
if nodeName := p.Client().ReverseLookupNode(hostname, ip, r.Alias); nodeName != "" { if nodeName := p.Client().ReverseLookupNode(hostname, ip, r.Alias); nodeName != "" {
zero := 0 zero := uint64(0)
if r.Proxmox == nil { if r.Proxmox == nil {
r.Proxmox = &proxmox.NodeConfig{} r.Proxmox = &proxmox.NodeConfig{}
} }
@@ -226,7 +226,7 @@ func (r *Route) validate() error {
// Then check if hostname, IP, or alias matches a VM resource // Then check if hostname, IP, or alias matches a VM resource
resource, _ := p.Client().ReverseLookupResource(ip, hostname, r.Alias) resource, _ := p.Client().ReverseLookupResource(ip, hostname, r.Alias)
if resource != nil { if resource != nil {
vmid := int(resource.VMID) vmid := resource.VMID
if r.Proxmox == nil { if r.Proxmox == nil {
r.Proxmox = &proxmox.NodeConfig{} r.Proxmox = &proxmox.NodeConfig{}
} }
@@ -706,7 +706,7 @@ func (r *Route) MarshalZerologObject(e *zerolog.Event) {
if r.Proxmox != nil { if r.Proxmox != nil {
e.Str("proxmox", r.Proxmox.Node) e.Str("proxmox", r.Proxmox.Node)
if r.Proxmox.VMID != nil { if r.Proxmox.VMID != nil {
e.Int("vmid", *r.Proxmox.VMID) e.Uint64("vmid", *r.Proxmox.VMID)
} }
if r.Proxmox.VMName != "" { if r.Proxmox.VMName != "" {
e.Str("vmname", r.Proxmox.VMName) e.Str("vmname", r.Proxmox.VMName)

View File

@@ -45,7 +45,7 @@ type (
} // @name IdlewatcherDockerConfig } // @name IdlewatcherDockerConfig
ProxmoxConfig struct { ProxmoxConfig struct {
Node string `json:"node" validate:"required"` Node string `json:"node" validate:"required"`
VMID int `json:"vmid" validate:"required"` VMID uint64 `json:"vmid" validate:"required"`
} // @name IdlewatcherProxmoxNodeConfig } // @name IdlewatcherProxmoxNodeConfig
) )
@@ -69,14 +69,14 @@ func (c *IdlewatcherConfig) Key() string {
if c.Docker != nil { if c.Docker != nil {
return c.Docker.ContainerID return c.Docker.ContainerID
} }
return c.Proxmox.Node + ":" + strconv.Itoa(c.Proxmox.VMID) return c.Proxmox.Node + ":" + strconv.FormatUint(c.Proxmox.VMID, 10)
} }
func (c *IdlewatcherConfig) ContainerName() string { func (c *IdlewatcherConfig) ContainerName() string {
if c.Docker != nil { if c.Docker != nil {
return c.Docker.ContainerName return c.Docker.ContainerName
} }
return "lxc-" + strconv.Itoa(c.Proxmox.VMID) return "lxc-" + strconv.FormatUint(c.Proxmox.VMID, 10)
} }
func (c *IdlewatcherConfig) Validate() error { func (c *IdlewatcherConfig) Validate() error {