routes in loadbalance pool no longer listed in ls-route and its API, the loadbalancer is listed instead. improved context handling and grateful shutdown

This commit is contained in:
yusing
2024-10-14 09:28:54 +08:00
parent d3b8cb8cba
commit 99207ae606
14 changed files with 278 additions and 166 deletions

View File

@@ -1,9 +0,0 @@
package route
import (
"time"
)
const (
streamStopListenTimeout = 1 * time.Second
)

View File

@@ -1,7 +1,6 @@
package route
import (
"context"
"errors"
"fmt"
"net/http"
@@ -10,6 +9,7 @@ import (
"github.com/sirupsen/logrus"
"github.com/yusing/go-proxy/internal/api/v1/errorpage"
"github.com/yusing/go-proxy/internal/common"
"github.com/yusing/go-proxy/internal/docker/idlewatcher"
E "github.com/yusing/go-proxy/internal/error"
gphttp "github.com/yusing/go-proxy/internal/net/http"
@@ -52,6 +52,10 @@ func (rp ReverseProxyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
rp.ReverseProxy.ServeHTTP(w, r)
}
func GetReverseProxies() F.Map[string, *HTTPRoute] {
return httpRoutes
}
func SetFindMuxDomains(domains []string) {
if len(domains) == 0 {
findMuxFunc = findMuxAnyDomain
@@ -91,8 +95,7 @@ func NewHTTPRoute(entry *P.ReverseProxyEntry) (*HTTPRoute, E.NestedError) {
}
if !entry.HealthCheck.Disabled {
r.healthMon = health.NewHTTPHealthMonitor(
context.Background(),
string(entry.Alias),
common.GlobalTask("Reverse proxy "+r.String()),
entry.URL,
entry.HealthCheck,
)

View File

@@ -5,14 +5,14 @@ import (
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/sirupsen/logrus"
"github.com/yusing/go-proxy/internal/common"
E "github.com/yusing/go-proxy/internal/error"
url "github.com/yusing/go-proxy/internal/net/types"
P "github.com/yusing/go-proxy/internal/proxy"
PT "github.com/yusing/go-proxy/internal/proxy/fields"
F "github.com/yusing/go-proxy/internal/utils/functional"
"github.com/yusing/go-proxy/internal/watcher/health"
)
@@ -20,16 +20,18 @@ type StreamRoute struct {
*P.StreamEntry
StreamImpl `json:"-"`
url url.URL
healthMon health.HealthMonitor
HealthMon health.HealthMonitor `json:"health"`
url url.URL
wg sync.WaitGroup
ctx context.Context
task common.Task
cancel context.CancelFunc
connCh chan any
started atomic.Bool
l logrus.FieldLogger
connCh chan any
l logrus.FieldLogger
mu sync.Mutex
}
type StreamImpl interface {
@@ -40,6 +42,12 @@ type StreamImpl interface {
String() string
}
var streamRoutes = F.NewMapOf[string, *StreamRoute]()
func GetStreamProxies() F.Map[string, *StreamRoute] {
return streamRoutes
}
func NewStreamRoute(entry *P.StreamEntry) (*StreamRoute, E.NestedError) {
// TODO: support non-coherent scheme
if !entry.Scheme.IsCoherent() {
@@ -60,9 +68,6 @@ func NewStreamRoute(entry *P.StreamEntry) (*StreamRoute, E.NestedError) {
} else {
base.StreamImpl = NewUDPRoute(base)
}
if !entry.Healthcheck.Disabled {
base.healthMon = health.NewRawHealthMonitor(base.ctx, string(entry.Alias), url, entry.Healthcheck)
}
base.l = logrus.WithField("route", base.StreamImpl)
return base, nil
}
@@ -76,72 +81,71 @@ func (r *StreamRoute) URL() url.URL {
}
func (r *StreamRoute) Start() E.NestedError {
if r.Port.ProxyPort == PT.NoPort || r.started.Load() {
r.mu.Lock()
defer r.mu.Unlock()
if r.Port.ProxyPort == PT.NoPort || r.task != nil {
return nil
}
r.ctx, r.cancel = context.WithCancel(context.Background())
r.task, r.cancel = common.NewTaskWithCancel(r.String())
r.wg.Wait()
if err := r.Setup(); err != nil {
return E.FailWith("setup", err)
}
r.l.Infof("listening on port %d", r.Port.ListeningPort)
r.started.Store(true)
r.wg.Add(2)
go r.grAcceptConnections()
go r.grHandleConnections()
if r.healthMon != nil {
r.healthMon.Start()
go r.acceptConnections()
go r.handleConnections()
if !r.Healthcheck.Disabled {
r.HealthMon = health.NewRawHealthMonitor(r.task, r.URL(), r.Healthcheck)
r.HealthMon.Start()
}
streamRoutes.Store(string(r.Alias), r)
return nil
}
func (r *StreamRoute) Stop() E.NestedError {
if !r.started.Load() {
r.mu.Lock()
defer r.mu.Unlock()
if r.task == nil {
return nil
}
r.started.Store(false)
if r.healthMon != nil {
r.healthMon.Stop()
streamRoutes.Delete(string(r.Alias))
if r.HealthMon != nil {
r.HealthMon.Stop()
r.HealthMon = nil
}
r.cancel()
r.CloseListeners()
done := make(chan struct{}, 1)
go func() {
r.wg.Wait()
close(done)
}()
r.wg.Wait()
r.task.Finished()
timeout := time.After(streamStopListenTimeout)
for {
select {
case <-done:
r.l.Debug("stopped listening")
return nil
case <-timeout:
return E.FailedWhy("stop", "timed out")
}
}
r.task, r.cancel = nil, nil
return nil
}
func (r *StreamRoute) Started() bool {
return r.started.Load()
return r.task != nil
}
func (r *StreamRoute) grAcceptConnections() {
func (r *StreamRoute) acceptConnections() {
defer r.wg.Done()
for {
select {
case <-r.ctx.Done():
case <-r.task.Context().Done():
return
default:
conn, err := r.Accept()
if err != nil {
select {
case <-r.ctx.Done():
case <-r.task.Context().Done():
return
default:
r.l.Error(err)
@@ -153,12 +157,12 @@ func (r *StreamRoute) grAcceptConnections() {
}
}
func (r *StreamRoute) grHandleConnections() {
func (r *StreamRoute) handleConnections() {
defer r.wg.Done()
for {
select {
case <-r.ctx.Done():
case <-r.task.Context().Done():
return
case conn := <-r.connCh:
go func() {

View File

@@ -51,7 +51,7 @@ func (route *TCPRoute) Handle(c any) error {
defer clientConn.Close()
ctx, cancel := context.WithTimeout(route.ctx, tcpDialTimeout)
ctx, cancel := context.WithTimeout(route.task.Context(), tcpDialTimeout)
defer cancel()
serverAddr := fmt.Sprintf("%s:%v", route.Host, route.Port.ProxyPort)
@@ -64,7 +64,7 @@ func (route *TCPRoute) Handle(c any) error {
route.mu.Lock()
pipe := U.NewBidirectionalPipe(route.ctx, clientConn, serverConn)
pipe := U.NewBidirectionalPipe(route.task.Context(), clientConn, serverConn)
route.pipe = append(route.pipe, pipe)
route.mu.Unlock()

View File

@@ -93,7 +93,7 @@ func (route *UDPRoute) Accept() (any, error) {
key,
srcConn,
dstConn,
U.NewBidirectionalPipe(route.ctx, sourceRWCloser{in, dstConn}, sourceRWCloser{in, srcConn}),
U.NewBidirectionalPipe(route.task.Context(), sourceRWCloser{in, dstConn}, sourceRWCloser{in, srcConn}),
}
route.connMap.Store(key, conn)
}