Improved healthcheck, idlewatcher support for loadbalanced routes, bug fixes

This commit is contained in:
yusing
2024-10-15 15:34:27 +08:00
parent 53fa28ae77
commit f4d532598c
34 changed files with 568 additions and 423 deletions

View File

@@ -43,7 +43,9 @@ func init() {
select {
case <-task.Context().Done():
clientMap.RangeAllParallel(func(_ string, c Client) {
c.Client.Close()
if c.Connected() {
c.Client.Close()
}
})
clientMap.Clear()
return

View File

@@ -41,6 +41,8 @@ type (
}
)
var DummyContainer = new(Container)
func FromDocker(c *types.Container, dockerHost string) (res *Container) {
isExplicit := c.Labels[LabelAliases] != ""
helper := containerHelper{c}

View File

@@ -2,7 +2,6 @@ package idlewatcher
import (
"context"
"encoding/json"
"net/http"
"strconv"
"time"
@@ -73,15 +72,15 @@ func (w *Waker) Uptime() time.Duration {
}
func (w *Waker) MarshalJSON() ([]byte, error) {
return json.Marshal(map[string]any{
"name": w.Name(),
"url": w.URL,
"status": w.Status(),
"config": health.HealthCheckConfig{
return (&health.JSONRepresentation{
Name: w.Name(),
Status: w.Status(),
Config: &health.HealthCheckConfig{
Interval: w.IdleTimeout,
Timeout: w.WakeTimeout,
},
})
URL: w.URL,
}).MarshalJSON()
}
/* End of HealthMonitor interface */
@@ -89,6 +88,10 @@ func (w *Waker) MarshalJSON() ([]byte, error) {
func (w *Waker) wake(rw http.ResponseWriter, r *http.Request) (shouldNext bool) {
w.resetIdleTimer()
if r.Body != nil {
defer r.Body.Close()
}
// pass through if container is ready
if w.ready.Load() {
return true
@@ -115,6 +118,16 @@ func (w *Waker) wake(rw http.ResponseWriter, r *http.Request) (shouldNext bool)
return
}
select {
case <-w.task.Context().Done():
http.Error(rw, "Waking timed out", http.StatusGatewayTimeout)
return
case <-ctx.Done():
http.Error(rw, "Waking timed out", http.StatusGatewayTimeout)
return
default:
}
// wake the container and reset idle timer
// also wait for another wake request
w.wakeCh <- struct{}{}
@@ -169,3 +182,8 @@ func (w *Waker) wake(rw http.ResponseWriter, r *http.Request) (shouldNext bool)
time.Sleep(100 * time.Millisecond)
}
}
// static HealthMonitor interface check
func (w *Waker) _() health.HealthMonitor {
return w
}

View File

@@ -8,10 +8,12 @@ import (
"github.com/docker/docker/api/types/container"
"github.com/sirupsen/logrus"
"github.com/yusing/go-proxy/internal/common"
D "github.com/yusing/go-proxy/internal/docker"
E "github.com/yusing/go-proxy/internal/error"
P "github.com/yusing/go-proxy/internal/proxy"
PT "github.com/yusing/go-proxy/internal/proxy/fields"
U "github.com/yusing/go-proxy/internal/utils"
F "github.com/yusing/go-proxy/internal/utils/functional"
W "github.com/yusing/go-proxy/internal/watcher"
)
@@ -29,9 +31,10 @@ type (
wakeDone chan E.NestedError
ticker *time.Ticker
ctx context.Context
cancel context.CancelFunc
refCount *sync.WaitGroup
task common.Task
cancel context.CancelFunc
refCount *U.RefCount
l logrus.FieldLogger
}
@@ -42,17 +45,11 @@ type (
)
var (
mainLoopCtx context.Context
mainLoopCancel context.CancelFunc
mainLoopWg sync.WaitGroup
watcherMap = F.NewMapOf[string, *Watcher]()
watcherMapMu sync.Mutex
portHistoryMap = F.NewMapOf[PT.Alias, string]()
newWatcherCh = make(chan *Watcher)
logger = logrus.WithField("module", "idle_watcher")
)
@@ -73,7 +70,7 @@ func Register(entry *P.ReverseProxyEntry) (*Watcher, E.NestedError) {
}
if w, ok := watcherMap.Load(key); ok {
w.refCount.Add(1)
w.refCount.Add()
w.ReverseProxyEntry = entry
return w, nil
}
@@ -86,83 +83,51 @@ func Register(entry *P.ReverseProxyEntry) (*Watcher, E.NestedError) {
w := &Watcher{
ReverseProxyEntry: entry,
client: client,
refCount: &sync.WaitGroup{},
refCount: U.NewRefCounter(),
wakeCh: make(chan struct{}, 1),
wakeDone: make(chan E.NestedError),
ticker: time.NewTicker(entry.IdleTimeout),
l: logger.WithField("container", entry.ContainerName),
}
w.refCount.Add(1)
w.task, w.cancel = common.NewTaskWithCancel("Idlewatcher for %s", w.Alias)
w.stopByMethod = w.getStopCallback()
watcherMap.Store(key, w)
go func() {
newWatcherCh <- w
}()
go w.watchUntilCancel()
return w, nil
}
func (w *Watcher) Unregister() {
w.refCount.Add(-1)
}
func Start() {
logger.Debug("started")
defer logger.Debug("stopped")
mainLoopCtx, mainLoopCancel = context.WithCancel(context.Background())
for {
select {
case <-mainLoopCtx.Done():
return
case w := <-newWatcherCh:
w.l.Debug("registered")
mainLoopWg.Add(1)
go func() {
w.watchUntilCancel()
w.refCount.Wait() // wait for 0 ref count
watcherMap.Delete(w.ContainerID)
w.l.Debug("unregistered")
mainLoopWg.Done()
}()
}
}
}
func Stop() {
mainLoopCancel()
mainLoopWg.Wait()
w.refCount.Sub()
}
func (w *Watcher) containerStop() error {
return w.client.ContainerStop(w.ctx, w.ContainerID, container.StopOptions{
return w.client.ContainerStop(w.task.Context(), w.ContainerID, container.StopOptions{
Signal: string(w.StopSignal),
Timeout: &w.StopTimeout,
})
}
func (w *Watcher) containerPause() error {
return w.client.ContainerPause(w.ctx, w.ContainerID)
return w.client.ContainerPause(w.task.Context(), w.ContainerID)
}
func (w *Watcher) containerKill() error {
return w.client.ContainerKill(w.ctx, w.ContainerID, string(w.StopSignal))
return w.client.ContainerKill(w.task.Context(), w.ContainerID, string(w.StopSignal))
}
func (w *Watcher) containerUnpause() error {
return w.client.ContainerUnpause(w.ctx, w.ContainerID)
return w.client.ContainerUnpause(w.task.Context(), w.ContainerID)
}
func (w *Watcher) containerStart() error {
return w.client.ContainerStart(w.ctx, w.ContainerID, container.StartOptions{})
return w.client.ContainerStart(w.task.Context(), w.ContainerID, container.StartOptions{})
}
func (w *Watcher) containerStatus() (string, E.NestedError) {
json, err := w.client.ContainerInspect(w.ctx, w.ContainerID)
json, err := w.client.ContainerInspect(w.task.Context(), w.ContainerID)
if err != nil {
return "", E.FailWith("inspect container", err)
}
@@ -221,12 +186,8 @@ func (w *Watcher) resetIdleTimer() {
}
func (w *Watcher) watchUntilCancel() {
defer close(w.wakeCh)
w.ctx, w.cancel = context.WithCancel(mainLoopCtx)
dockerWatcher := W.NewDockerWatcherWithClient(w.client)
dockerEventCh, dockerEventErrCh := dockerWatcher.EventsWithOptions(w.ctx, W.DockerListOptions{
dockerEventCh, dockerEventErrCh := dockerWatcher.EventsWithOptions(w.task.Context(), W.DockerListOptions{
Filters: W.NewDockerFilter(
W.DockerFilterContainer,
W.DockerrFilterContainer(w.ContainerID),
@@ -238,13 +199,23 @@ func (w *Watcher) watchUntilCancel() {
W.DockerFilterUnpause,
),
})
defer w.ticker.Stop()
defer w.client.Close()
defer func() {
w.ticker.Stop()
w.client.Close()
close(w.wakeDone)
close(w.wakeCh)
watcherMap.Delete(w.ContainerID)
w.task.Finished()
}()
for {
select {
case <-w.ctx.Done():
w.l.Debug("stopped")
case <-w.task.Context().Done():
w.l.Debug("stopped by context done")
return
case <-w.refCount.Zero():
w.l.Debug("stopped by zero ref count")
return
case err := <-dockerEventErrCh:
if err != nil && err.IsNot(context.Canceled) {

View File

@@ -7,6 +7,17 @@ import (
E "github.com/yusing/go-proxy/internal/error"
)
func Inspect(dockerHost string, containerID string) (*Container, E.NestedError) {
client, err := ConnectClient(dockerHost)
defer client.Close()
if err.HasError() {
return nil, E.FailWith("connect to docker", err)
}
return client.Inspect(containerID)
}
func (c Client) Inspect(containerID string) (*Container, E.NestedError) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()