fix docker client data race on Close(), remove SharedClient.IsConnected

This commit is contained in:
yusing
2025-03-01 15:47:08 +08:00
parent 357ad26a0e
commit 9b2ee628aa
3 changed files with 13 additions and 72 deletions

View File

@@ -4,6 +4,7 @@ import (
"errors"
"net/http"
"sync"
"sync/atomic"
"time"
"github.com/docker/cli/cli/connhelper"
@@ -61,9 +62,7 @@ func init() {
for _, c := range clientMap {
delete(clientMap, c.key)
if c.Connected() {
c.Client.Close()
}
c.Client.Close()
}
})
}
@@ -75,10 +74,6 @@ func closeTimedOutClients() {
now := time.Now().Unix()
for _, c := range clientMap {
if !c.Connected() {
delete(clientMap, c.key)
continue
}
if c.closedOn == 0 {
continue
}
@@ -90,14 +85,10 @@ func closeTimedOutClients() {
}
}
func (c *SharedClient) Connected() bool {
return c != nil && c.Client != nil
}
// if the client is still referenced, this is no-op.
func (c *SharedClient) Close() {
c.closedOn = time.Now().Unix()
c.refCount--
atomic.StoreInt64(&c.closedOn, time.Now().Unix())
atomic.AddUint32(&c.refCount, ^uint32(0))
}
// ConnectClient creates a new Docker client connection to the specified host.
@@ -115,8 +106,8 @@ func ConnectClient(host string) (*SharedClient, error) {
defer clientMapMu.Unlock()
if client, ok := clientMap[host]; ok {
client.closedOn = 0
client.refCount++
atomic.StoreInt64(&client.closedOn, 0)
atomic.AddUint32(&client.refCount, 1)
return client, nil
}

View File

@@ -146,9 +146,6 @@ func (w *Watcher) containerStart(ctx context.Context) error {
}
func (w *Watcher) containerStatus() (string, error) {
if !w.client.Connected() {
return "", errors.New("docker client not connected")
}
ctx, cancel := context.WithTimeoutCause(w.task.Context(), dockerReqTimeout, errors.New("docker request timeout"))
defer cancel()
json, err := w.client.ContainerInspect(ctx, w.ContainerID)
@@ -242,7 +239,7 @@ func (w *Watcher) getEventCh(dockerWatcher watcher.DockerWatcher) (eventCh <-cha
// it exits only if the context is canceled, the container is destroyed,
// errors occurred on docker client, or route provider died (mainly caused by config reload).
func (w *Watcher) watchUntilDestroy() (returnCause error) {
dockerWatcher := watcher.NewDockerWatcherWithClient(w.client)
dockerWatcher := watcher.NewDockerWatcher(w.client.DaemonHost())
dockerEventCh, dockerEventErrCh := w.getEventCh(dockerWatcher)
for {