mirror of
https://github.com/yusing/godoxy.git
synced 2026-04-28 03:07:07 +02:00
refactor(api): restructured API for type safety, maintainability and docs generation
- These changes makes the API incombatible with previous versions - Added new types for error handling, success responses, and health checks. - Updated health check logic to utilize the new types for better clarity and structure. - Refactored existing handlers to improve response consistency and error handling. - Updated Makefile to include a new target for generating API types from Swagger. - Updated "new agent" API to respond an encrypted cert pair
This commit is contained in:
@@ -5,13 +5,18 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/gin-gonic/gin"
|
||||
apitypes "github.com/yusing/go-proxy/internal/api/types"
|
||||
metricsutils "github.com/yusing/go-proxy/internal/metrics/utils"
|
||||
"github.com/yusing/go-proxy/internal/net/gphttp"
|
||||
"github.com/yusing/go-proxy/internal/net/gphttp/gpwebsocket"
|
||||
"github.com/yusing/go-proxy/internal/net/gphttp/httpheaders"
|
||||
"github.com/yusing/go-proxy/internal/net/gphttp/websocket"
|
||||
)
|
||||
|
||||
type ResponseType[AggregateT any] struct {
|
||||
Total int `json:"total"`
|
||||
Data AggregateT `json:"data"`
|
||||
}
|
||||
|
||||
// ServeHTTP serves the data for the given period.
|
||||
//
|
||||
// If the period is not specified, it serves the last result.
|
||||
@@ -23,10 +28,10 @@ import (
|
||||
// If the data is not found, it returns a 204 error.
|
||||
//
|
||||
// If the request is a websocket request, it serves the data for the given period for every interval.
|
||||
func (p *Poller[T, AggregateT]) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
query := r.URL.Query()
|
||||
func (p *Poller[T, AggregateT]) ServeHTTP(c *gin.Context) {
|
||||
query := c.Request.URL.Query()
|
||||
|
||||
if httpheaders.IsWebsocket(r.Header) {
|
||||
if httpheaders.IsWebsocket(c.Request.Header) {
|
||||
interval := metricsutils.QueryDuration(query, "interval", 0)
|
||||
|
||||
minInterval := 1 * time.Second
|
||||
@@ -36,27 +41,20 @@ func (p *Poller[T, AggregateT]) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
if interval < minInterval {
|
||||
interval = minInterval
|
||||
}
|
||||
gpwebsocket.Periodic(w, r, interval, func(conn *websocket.Conn) error {
|
||||
data, err := p.getRespData(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
return conn.WriteJSON(data)
|
||||
websocket.PeriodicWrite(c, interval, func() (any, error) {
|
||||
return p.getRespData(c.Request)
|
||||
})
|
||||
} else {
|
||||
data, err := p.getRespData(r)
|
||||
data, err := p.getRespData(c.Request)
|
||||
if err != nil {
|
||||
gphttp.ServerError(w, r, err)
|
||||
c.Error(apitypes.InternalServerError(err, "failed to get response data"))
|
||||
return
|
||||
}
|
||||
if data == nil {
|
||||
http.Error(w, "no data", http.StatusNoContent)
|
||||
c.JSON(http.StatusNoContent, apitypes.Error("no data"))
|
||||
return
|
||||
}
|
||||
gphttp.RespondJSON(w, r, data)
|
||||
c.JSON(http.StatusOK, data)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,16 +10,24 @@ type Period[T any] struct {
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
type Filter string
|
||||
type Filter string // @name MetricsPeriod
|
||||
|
||||
const (
|
||||
MetricsPeriod5m Filter = "5m" // @name MetricsPeriod5m
|
||||
MetricsPeriod15m Filter = "15m" // @name MetricsPeriod15m
|
||||
MetricsPeriod1h Filter = "1h" // @name MetricsPeriod1h
|
||||
MetricsPeriod1d Filter = "1d" // @name MetricsPeriod1d
|
||||
MetricsPeriod1mo Filter = "1mo" // @name MetricsPeriod1mo
|
||||
)
|
||||
|
||||
func NewPeriod[T any]() *Period[T] {
|
||||
return &Period[T]{
|
||||
Entries: map[Filter]*Entries[T]{
|
||||
"5m": newEntries[T](5 * time.Minute),
|
||||
"15m": newEntries[T](15 * time.Minute),
|
||||
"1h": newEntries[T](1 * time.Hour),
|
||||
"1d": newEntries[T](24 * time.Hour),
|
||||
"1mo": newEntries[T](30 * 24 * time.Hour),
|
||||
MetricsPeriod5m: newEntries[T](5 * time.Minute),
|
||||
MetricsPeriod15m: newEntries[T](15 * time.Minute),
|
||||
MetricsPeriod1h: newEntries[T](1 * time.Hour),
|
||||
MetricsPeriod1d: newEntries[T](24 * time.Hour),
|
||||
MetricsPeriod1mo: newEntries[T](30 * 24 * time.Hour),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
// json tags are left for tests
|
||||
|
||||
type (
|
||||
Sensors []sensors.TemperatureStat
|
||||
Sensors []sensors.TemperatureStat // @name Sensors
|
||||
Aggregated []map[string]any
|
||||
)
|
||||
|
||||
@@ -35,7 +35,7 @@ type SystemInfo struct {
|
||||
DisksIO map[string]*disk.IOCountersStat `json:"disks_io"` // disk IO by device
|
||||
Network *net.IOCountersStat `json:"network"`
|
||||
Sensors Sensors `json:"sensors"` // sensor temperature by key
|
||||
}
|
||||
} // @name SystemInfo
|
||||
|
||||
const (
|
||||
queryCPUAverage = "cpu_average"
|
||||
|
||||
@@ -4,35 +4,46 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"slices"
|
||||
|
||||
"github.com/lithammer/fuzzysearch/fuzzy"
|
||||
"github.com/yusing/go-proxy/internal/metrics/period"
|
||||
metricsutils "github.com/yusing/go-proxy/internal/metrics/utils"
|
||||
"github.com/yusing/go-proxy/internal/route/routes"
|
||||
"github.com/yusing/go-proxy/internal/watcher/health"
|
||||
"github.com/yusing/go-proxy/internal/types"
|
||||
)
|
||||
|
||||
type (
|
||||
StatusByAlias struct {
|
||||
Map map[string]*routes.HealthInfoRaw `json:"statuses"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
}
|
||||
Map map[string]routes.HealthInfo `json:"statuses"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
} // @name RouteStatusesByAlias
|
||||
Status struct {
|
||||
Status health.Status `json:"status"`
|
||||
Latency int64 `json:"latency"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
}
|
||||
RouteStatuses map[string][]*Status
|
||||
Aggregated []map[string]any
|
||||
Status types.HealthStatus `json:"status" swaggertype:"string" enums:"healthy,unhealthy,unknown,napping,starting"`
|
||||
Latency int64 `json:"latency"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
} // @name RouteStatus
|
||||
RouteStatuses map[string][]*Status // @name RouteStatuses
|
||||
RouteAggregate struct {
|
||||
Alias string `json:"alias"`
|
||||
DisplayName string `json:"display_name"`
|
||||
Uptime float64 `json:"uptime"`
|
||||
Downtime float64 `json:"downtime"`
|
||||
Idle float64 `json:"idle"`
|
||||
AvgLatency float64 `json:"avg_latency"`
|
||||
Statuses []*Status `json:"statuses"`
|
||||
} // @name RouteUptimeAggregate
|
||||
Aggregated []RouteAggregate
|
||||
)
|
||||
|
||||
var Poller = period.NewPoller("uptime", getStatuses, aggregateStatuses)
|
||||
|
||||
func getStatuses(ctx context.Context, _ *StatusByAlias) (*StatusByAlias, error) {
|
||||
return &StatusByAlias{
|
||||
Map: routes.HealthInfo(),
|
||||
Map: routes.GetHealthInfo(),
|
||||
Timestamp: time.Now().Unix(),
|
||||
}, nil
|
||||
}
|
||||
@@ -78,11 +89,11 @@ func (rs RouteStatuses) calculateInfo(statuses []*Status) (up float64, down floa
|
||||
latency := float64(0)
|
||||
for _, status := range statuses {
|
||||
// ignoring unknown; treating napping and starting as downtime
|
||||
if status.Status == health.StatusUnknown {
|
||||
if status.Status == types.StatusUnknown {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case status.Status == health.StatusHealthy:
|
||||
case status.Status == types.StatusHealthy:
|
||||
up++
|
||||
case status.Status.Idling():
|
||||
idle++
|
||||
@@ -110,28 +121,39 @@ func (rs RouteStatuses) aggregate(limit int, offset int) Aggregated {
|
||||
sortedAliases[i] = alias
|
||||
i++
|
||||
}
|
||||
sort.Strings(sortedAliases)
|
||||
// unknown statuses are at the end, then sort by alias
|
||||
slices.SortFunc(sortedAliases, func(a, b string) int {
|
||||
if rs[a][len(rs[a])-1].Status == types.StatusUnknown {
|
||||
return 1
|
||||
}
|
||||
if rs[b][len(rs[b])-1].Status == types.StatusUnknown {
|
||||
return -1
|
||||
}
|
||||
return strings.Compare(a, b)
|
||||
})
|
||||
sortedAliases = sortedAliases[beg:end]
|
||||
result := make(Aggregated, len(sortedAliases))
|
||||
for i, alias := range sortedAliases {
|
||||
statuses := rs[alias]
|
||||
up, down, idle, latency := rs.calculateInfo(statuses)
|
||||
result[i] = map[string]any{
|
||||
"alias": alias,
|
||||
"uptime": up,
|
||||
"downtime": down,
|
||||
"idle": idle,
|
||||
"avg_latency": latency,
|
||||
"statuses": statuses,
|
||||
result[i] = RouteAggregate{
|
||||
Alias: alias,
|
||||
Uptime: up,
|
||||
Downtime: down,
|
||||
Idle: idle,
|
||||
AvgLatency: latency,
|
||||
Statuses: statuses,
|
||||
}
|
||||
r, ok := routes.Get(alias)
|
||||
if ok {
|
||||
result[i]["display_name"] = r.HomepageConfig().Name
|
||||
result[i].DisplayName = r.HomepageConfig().Name
|
||||
} else {
|
||||
result[i].DisplayName = alias
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (result Aggregated) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal([]map[string]any(result))
|
||||
return json.Marshal([]RouteAggregate(result))
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user