Fixed a few issues:

- Incorrect name being shown on dashboard "Proxies page"
- Apps being shown when homepage.show is false
- Load balanced routes are shown on homepage instead of the load balancer
- Route with idlewatcher will now be removed on container destroy
- Idlewatcher panic
- Performance improvement
- Idlewatcher infinitely loading
- Reload stucked / not working properly
- Streams stuck on shutdown / reload
- etc...
Added:
- support idlewatcher for loadbalanced routes
- partial implementation for stream type idlewatcher
Issues:
- graceful shutdown
This commit is contained in:
yusing
2024-10-18 16:47:01 +08:00
parent c0c61709ca
commit 53557e38b6
69 changed files with 2368 additions and 1654 deletions

View File

@@ -2,191 +2,193 @@ package idlewatcher
import (
"context"
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/docker/docker/api/types/container"
"github.com/sirupsen/logrus"
"github.com/yusing/go-proxy/internal/common"
D "github.com/yusing/go-proxy/internal/docker"
idlewatcher "github.com/yusing/go-proxy/internal/docker/idlewatcher/config"
E "github.com/yusing/go-proxy/internal/error"
P "github.com/yusing/go-proxy/internal/proxy"
PT "github.com/yusing/go-proxy/internal/proxy/fields"
"github.com/yusing/go-proxy/internal/proxy/entry"
"github.com/yusing/go-proxy/internal/task"
U "github.com/yusing/go-proxy/internal/utils"
F "github.com/yusing/go-proxy/internal/utils/functional"
"github.com/yusing/go-proxy/internal/watcher"
W "github.com/yusing/go-proxy/internal/watcher"
"github.com/yusing/go-proxy/internal/watcher/events"
)
type (
Watcher struct {
*P.ReverseProxyEntry
_ U.NoCopy
client D.Client
*idlewatcher.Config
*waker
ready atomic.Bool // whether the site is ready to accept connection
client D.Client
stopByMethod StopCallback // send a docker command w.r.t. `stop_method`
ticker *time.Ticker
task common.Task
cancel context.CancelFunc
refCount *U.RefCount
l logrus.FieldLogger
ticker *time.Ticker
task task.Task
l *logrus.Entry
}
WakeDone <-chan error
WakeFunc func() WakeDone
StopCallback func() E.NestedError
StopCallback func() error
)
var (
watcherMap = F.NewMapOf[string, *Watcher]()
watcherMapMu sync.Mutex
portHistoryMap = F.NewMapOf[PT.Alias, string]()
logger = logrus.WithField("module", "idle_watcher")
)
func Register(entry *P.ReverseProxyEntry) (*Watcher, E.NestedError) {
failure := E.Failure("idle_watcher register")
const dockerReqTimeout = 3 * time.Second
if entry.IdleTimeout == 0 {
return nil, failure.With(E.Invalid("idle_timeout", 0))
func registerWatcher(providerSubtask task.Task, entry entry.Entry, waker *waker) (*Watcher, E.NestedError) {
failure := E.Failure("idle_watcher register")
cfg := entry.IdlewatcherConfig()
if cfg.IdleTimeout == 0 {
panic("should not reach here")
}
watcherMapMu.Lock()
defer watcherMapMu.Unlock()
key := entry.ContainerID
if entry.URL.Port() != "0" {
portHistoryMap.Store(entry.Alias, entry.URL.Port())
}
key := cfg.ContainerID
if w, ok := watcherMap.Load(key); ok {
w.refCount.Add()
w.ReverseProxyEntry = entry
w.Config = cfg
w.waker = waker
w.resetIdleTimer()
return w, nil
}
client, err := D.ConnectClient(entry.DockerHost)
client, err := D.ConnectClient(cfg.DockerHost)
if err.HasError() {
return nil, failure.With(err)
}
w := &Watcher{
ReverseProxyEntry: entry,
client: client,
refCount: U.NewRefCounter(),
ticker: time.NewTicker(entry.IdleTimeout),
l: logger.WithField("container", entry.ContainerName),
Config: cfg,
waker: waker,
client: client,
task: providerSubtask,
ticker: time.NewTicker(cfg.IdleTimeout),
l: logger.WithField("container", cfg.ContainerName),
}
w.task, w.cancel = common.NewTaskWithCancel("Idlewatcher for %s", w.Alias)
w.stopByMethod = w.getStopCallback()
watcherMap.Store(key, w)
go w.watchUntilCancel()
go func() {
cause := w.watchUntilDestroy()
watcherMapMu.Lock()
watcherMap.Delete(w.ContainerID)
watcherMapMu.Unlock()
w.ticker.Stop()
w.client.Close()
w.task.Finish(cause.Error())
}()
return w, nil
}
func (w *Watcher) Unregister() {
w.refCount.Sub()
}
func (w *Watcher) containerStop() error {
return w.client.ContainerStop(w.task.Context(), w.ContainerID, container.StopOptions{
func (w *Watcher) containerStop(ctx context.Context) error {
return w.client.ContainerStop(ctx, w.ContainerID, container.StopOptions{
Signal: string(w.StopSignal),
Timeout: &w.StopTimeout,
})
}
func (w *Watcher) containerPause() error {
return w.client.ContainerPause(w.task.Context(), w.ContainerID)
func (w *Watcher) containerPause(ctx context.Context) error {
return w.client.ContainerPause(ctx, w.ContainerID)
}
func (w *Watcher) containerKill() error {
return w.client.ContainerKill(w.task.Context(), w.ContainerID, string(w.StopSignal))
func (w *Watcher) containerKill(ctx context.Context) error {
return w.client.ContainerKill(ctx, w.ContainerID, string(w.StopSignal))
}
func (w *Watcher) containerUnpause() error {
return w.client.ContainerUnpause(w.task.Context(), w.ContainerID)
func (w *Watcher) containerUnpause(ctx context.Context) error {
return w.client.ContainerUnpause(ctx, w.ContainerID)
}
func (w *Watcher) containerStart() error {
return w.client.ContainerStart(w.task.Context(), w.ContainerID, container.StartOptions{})
func (w *Watcher) containerStart(ctx context.Context) error {
return w.client.ContainerStart(ctx, w.ContainerID, container.StartOptions{})
}
func (w *Watcher) containerStatus() (string, E.NestedError) {
func (w *Watcher) containerStatus() (string, error) {
if !w.client.Connected() {
return "", E.Failure("docker client closed")
return "", errors.New("docker client not connected")
}
json, err := w.client.ContainerInspect(w.task.Context(), w.ContainerID)
ctx, cancel := context.WithTimeoutCause(w.task.Context(), dockerReqTimeout, errors.New("docker request timeout"))
defer cancel()
json, err := w.client.ContainerInspect(ctx, w.ContainerID)
if err != nil {
return "", E.FailWith("inspect container", err)
return "", fmt.Errorf("failed to inspect container: %w", err)
}
return json.State.Status, nil
}
func (w *Watcher) wakeIfStopped() E.NestedError {
if w.ready.Load() || w.ContainerRunning {
func (w *Watcher) wakeIfStopped() error {
if w.ContainerRunning {
return nil
}
status, err := w.containerStatus()
if err.HasError() {
if err != nil {
return err
}
// "created", "running", "paused", "restarting", "removing", "exited", or "dead"
ctx, cancel := context.WithTimeout(w.task.Context(), dockerReqTimeout)
defer cancel()
// !Hard coded here since theres no constants from Docker API
switch status {
case "exited", "dead":
return E.From(w.containerStart())
return w.containerStart(ctx)
case "paused":
return E.From(w.containerUnpause())
return w.containerUnpause(ctx)
case "running":
return nil
default:
return E.Unexpected("container state", status)
panic("should not reach here")
}
}
func (w *Watcher) getStopCallback() StopCallback {
var cb func() error
var cb func(context.Context) error
switch w.StopMethod {
case PT.StopMethodPause:
case idlewatcher.StopMethodPause:
cb = w.containerPause
case PT.StopMethodStop:
case idlewatcher.StopMethodStop:
cb = w.containerStop
case PT.StopMethodKill:
case idlewatcher.StopMethodKill:
cb = w.containerKill
default:
panic("should not reach here")
}
return func() E.NestedError {
status, err := w.containerStatus()
if err.HasError() {
return err
}
if status != "running" {
return nil
}
return E.From(cb())
return func() error {
ctx, cancel := context.WithTimeout(w.task.Context(), dockerReqTimeout)
defer cancel()
return cb(ctx)
}
}
func (w *Watcher) resetIdleTimer() {
w.l.Trace("reset idle timer")
w.ticker.Reset(w.IdleTimeout)
}
func (w *Watcher) watchUntilCancel() {
dockerWatcher := W.NewDockerWatcherWithClient(w.client)
dockerEventCh, dockerEventErrCh := dockerWatcher.EventsWithOptions(w.task.Context(), W.DockerListOptions{
func (w *Watcher) getEventCh(dockerWatcher watcher.DockerWatcher) (eventTask task.Task, eventCh <-chan events.Event, errCh <-chan E.NestedError) {
eventTask = w.task.Subtask("watcher for %s", w.ContainerID)
eventCh, errCh = dockerWatcher.EventsWithOptions(eventTask.Context(), W.DockerListOptions{
Filters: W.NewDockerFilter(
W.DockerFilterContainer,
W.DockerrFilterContainer(w.ContainerID),
@@ -194,34 +196,47 @@ func (w *Watcher) watchUntilCancel() {
W.DockerFilterStop,
W.DockerFilterDie,
W.DockerFilterKill,
W.DockerFilterDestroy,
W.DockerFilterPause,
W.DockerFilterUnpause,
),
})
return
}
defer func() {
w.cancel()
w.ticker.Stop()
w.client.Close()
watcherMap.Delete(w.ContainerID)
w.task.Finished()
}()
// watchUntilDestroy waits for the container to be created, started, or unpaused,
// and then reset the idle timer.
//
// When the container is stopped, paused,
// or killed, the idle timer is stopped and the ContainerRunning flag is set to false.
//
// When the idle timer fires, the container is stopped according to the
// stop method.
//
// it exits only if the context is canceled, the container is destroyed,
// errors occured on docker client, or route provider died (mainly caused by config reload).
func (w *Watcher) watchUntilDestroy() error {
dockerWatcher := W.NewDockerWatcherWithClient(w.client)
eventTask, dockerEventCh, dockerEventErrCh := w.getEventCh(dockerWatcher)
for {
select {
case <-w.task.Context().Done():
w.l.Debug("stopped by context done")
return
case <-w.refCount.Zero():
w.l.Debug("stopped by zero ref count")
return
cause := context.Cause(w.task.Context())
w.l.Debugf("watcher stopped by context done: %s", cause)
return cause
case err := <-dockerEventErrCh:
if err != nil && err.IsNot(context.Canceled) {
w.l.Error(E.FailWith("docker watcher", err))
return
return err.Error()
}
case e := <-dockerEventCh:
switch {
case e.Action == events.ActionContainerDestroy:
w.ContainerRunning = false
w.ready.Store(false)
w.l.Info("watcher stopped by container destruction")
return errors.New("container destroyed")
// create / start / unpause
case e.Action.IsContainerWake():
w.ContainerRunning = true
@@ -229,18 +244,31 @@ func (w *Watcher) watchUntilCancel() {
w.l.Info("container awaken")
case e.Action.IsContainerSleep(): // stop / pause / kil
w.ContainerRunning = false
w.ticker.Stop()
w.ready.Store(false)
w.ticker.Stop()
default:
w.l.Errorf("unexpected docker event: %s", e)
}
// container name changed should also change the container id
if w.ContainerName != e.ActorName {
w.l.Debugf("container renamed %s -> %s", w.ContainerName, e.ActorName)
w.ContainerName = e.ActorName
}
if w.ContainerID != e.ActorID {
w.l.Debugf("container id changed %s -> %s", w.ContainerID, e.ActorID)
w.ContainerID = e.ActorID
// recreate event stream
eventTask.Finish("recreate event stream")
eventTask, dockerEventCh, dockerEventErrCh = w.getEventCh(dockerWatcher)
}
case <-w.ticker.C:
w.l.Debug("idle timeout")
w.ticker.Stop()
if err := w.stopByMethod(); err != nil && err.IsNot(context.Canceled) {
w.l.Error(E.FailWith("stop", err).Extraf("stop method: %s", w.StopMethod))
} else {
w.l.Info("stopped by idle timeout")
if w.ContainerRunning {
if err := w.stopByMethod(); err != nil && !errors.Is(err, context.Canceled) {
w.l.Errorf("container stop with method %q failed with error: %v", w.StopMethod, err)
} else {
w.l.Info("container stopped by idle timeout")
}
}
}
}