refactor(metrics): optimize JSON marshaling and aggregation logic

- Updated JSON marshaling in SystemInfo to use quoted keys.
- Refactored aggregation logic to dynamically append entries.
- Adjusted test cases to reflect changes in data structure and ensure accurate serialization.
This commit is contained in:
yusing
2025-09-14 00:07:34 +08:00
parent d56663d3f9
commit 124069aaa4
4 changed files with 79 additions and 70 deletions

View File

@@ -47,7 +47,7 @@ func (s *SystemInfo) MarshalJSON() ([]byte, error) {
b = append(b, ',')
}
b = fmt.Appendf(b,
`"%s":{"device":"%s","path":"%s","fstype":"%s","total":%d,"free":%d,"used":%d,"used_percent":%.2f}`,
`%q:{"device":%q,"path":%q,"fstype":%q,"total":%d,"free":%d,"used":%d,"used_percent":%.2f}`,
device,
device,
disk.Path,
@@ -74,7 +74,7 @@ func (s *SystemInfo) MarshalJSON() ([]byte, error) {
b = append(b, ',')
}
b = fmt.Appendf(b,
`"%s":{"name":"%s","read_bytes":%d,"write_bytes":%d,"read_speed":%.2f,"write_speed":%.2f,"iops":%d}`,
`%q:{"name":%q,"read_bytes":%d,"write_bytes":%d,"read_speed":%.2f,"write_speed":%.2f,"iops":%d}`,
name,
name,
usage.ReadBytes,
@@ -114,7 +114,7 @@ func (s *SystemInfo) MarshalJSON() ([]byte, error) {
b = append(b, ',')
}
b = fmt.Appendf(b,
`{"name":"%s","temperature":%.2f,"high":%.2f,"critical":%.2f}`,
`{"name":%q,"temperature":%.2f,"high":%.2f,"critical":%.2f}`,
sensor.SensorKey,
sensor.Temperature,
sensor.High,

View File

@@ -223,39 +223,39 @@ func (s *SystemInfo) collectSensorsInfo(ctx context.Context) error {
func aggregate(entries []*SystemInfo, query url.Values) (total int, result Aggregated) {
n := len(entries)
aggregated := Aggregated{
Entries: make([]map[string]any, n),
Entries: make([]map[string]any, 0, n),
Mode: SystemInfoAggregateMode(query.Get("aggregate")),
}
switch aggregated.Mode {
case SystemInfoAggregateModeCPUAverage:
for i, entry := range entries {
for _, entry := range entries {
if entry.CPUAverage != nil {
aggregated.Entries[i] = map[string]any{
aggregated.Entries = append(aggregated.Entries, map[string]any{
"timestamp": entry.Timestamp,
"cpu_average": *entry.CPUAverage,
}
})
}
}
case SystemInfoAggregateModeMemoryUsage:
for i, entry := range entries {
for _, entry := range entries {
if entry.Memory != nil {
aggregated.Entries[i] = map[string]any{
aggregated.Entries = append(aggregated.Entries, map[string]any{
"timestamp": entry.Timestamp,
"memory_usage": entry.Memory.Used,
}
})
}
}
case SystemInfoAggregateModeMemoryUsagePercent:
for i, entry := range entries {
for _, entry := range entries {
if entry.Memory != nil {
aggregated.Entries[i] = map[string]any{
aggregated.Entries = append(aggregated.Entries, map[string]any{
"timestamp": entry.Timestamp,
"memory_usage_percent": entry.Memory.UsedPercent,
}
})
}
}
case SystemInfoAggregateModeDisksReadSpeed:
for i, entry := range entries {
for _, entry := range entries {
if entry.DisksIO == nil {
continue
}
@@ -264,10 +264,10 @@ func aggregate(entries []*SystemInfo, query url.Values) (total int, result Aggre
m[name] = usage.ReadSpeed
}
m["timestamp"] = entry.Timestamp
aggregated.Entries[i] = m
aggregated.Entries = append(aggregated.Entries, m)
}
case SystemInfoAggregateModeDisksWriteSpeed:
for i, entry := range entries {
for _, entry := range entries {
if entry.DisksIO == nil {
continue
}
@@ -276,10 +276,10 @@ func aggregate(entries []*SystemInfo, query url.Values) (total int, result Aggre
m[name] = usage.WriteSpeed
}
m["timestamp"] = entry.Timestamp
aggregated.Entries[i] = m
aggregated.Entries = append(aggregated.Entries, m)
}
case SystemInfoAggregateModeDisksIOPS:
for i, entry := range entries {
for _, entry := range entries {
if entry.DisksIO == nil {
continue
}
@@ -288,10 +288,10 @@ func aggregate(entries []*SystemInfo, query url.Values) (total int, result Aggre
m[name] = usage.Iops
}
m["timestamp"] = entry.Timestamp
aggregated.Entries[i] = m
aggregated.Entries = append(aggregated.Entries, m)
}
case SystemInfoAggregateModeDiskUsage:
for i, entry := range entries {
for _, entry := range entries {
if entry.Disks == nil {
continue
}
@@ -300,32 +300,32 @@ func aggregate(entries []*SystemInfo, query url.Values) (total int, result Aggre
m[name] = disk.Used
}
m["timestamp"] = entry.Timestamp
aggregated.Entries[i] = m
aggregated.Entries = append(aggregated.Entries, m)
}
case SystemInfoAggregateModeNetworkSpeed:
for i, entry := range entries {
for _, entry := range entries {
if entry.Network == nil {
continue
}
aggregated.Entries[i] = map[string]any{
aggregated.Entries = append(aggregated.Entries, map[string]any{
"timestamp": entry.Timestamp,
"upload": entry.Network.UploadSpeed,
"download": entry.Network.DownloadSpeed,
}
})
}
case SystemInfoAggregateModeNetworkTransfer:
for i, entry := range entries {
for _, entry := range entries {
if entry.Network == nil {
continue
}
aggregated.Entries[i] = map[string]any{
aggregated.Entries = append(aggregated.Entries, map[string]any{
"timestamp": entry.Timestamp,
"upload": entry.Network.BytesSent,
"download": entry.Network.BytesRecv,
}
})
}
case SystemInfoAggregateModeSensorTemperature:
for i, entry := range entries {
for _, entry := range entries {
if entry.Sensors == nil {
continue
}
@@ -334,7 +334,7 @@ func aggregate(entries []*SystemInfo, query url.Values) (total int, result Aggre
m[sensor.SensorKey] = sensor.Temperature
}
m["timestamp"] = entry.Timestamp
aggregated.Entries[i] = m
aggregated.Entries = append(aggregated.Entries, m)
}
default:
return -1, Aggregated{}

View File

@@ -133,11 +133,11 @@ func TestSerialize(t *testing.T) {
ExpectNoError(t, err)
var v []map[string]any
ExpectNoError(t, json.Unmarshal(s, &v))
ExpectEqual(t, len(v), len(result))
ExpectEqual(t, len(v), len(result.Entries))
for i, m := range v {
for k, v := range m {
// some int64 values are converted to float64 on json.Unmarshal
vv := reflect.ValueOf(result[i][k])
vv := reflect.ValueOf(result.Entries[i][k])
ExpectEqual(t, reflect.ValueOf(v).Convert(vv.Type()).Interface(), vv.Interface())
}
}
@@ -177,7 +177,7 @@ func BenchmarkSerialize(b *testing.B) {
b.Run("json", func(b *testing.B) {
for b.Loop() {
for _, query := range allQueries {
_, _ = json.Marshal([]map[string]any(queries[string(query)]))
_, _ = json.Marshal([]map[string]any(queries[string(query)].Entries))
}
}
})

View File

@@ -4,12 +4,12 @@ import (
"context"
"encoding/json"
"net/url"
"strings"
"time"
"slices"
"github.com/lithammer/fuzzysearch/fuzzy"
config "github.com/yusing/go-proxy/internal/config/types"
"github.com/yusing/go-proxy/internal/metrics/period"
metricsutils "github.com/yusing/go-proxy/internal/metrics/utils"
"github.com/yusing/go-proxy/internal/route/routes"
@@ -23,18 +23,20 @@ type (
} // @name RouteStatusesByAlias
Status struct {
Status types.HealthStatus `json:"status" swaggertype:"string" enums:"healthy,unhealthy,unknown,napping,starting"`
Latency int64 `json:"latency"`
Latency int32 `json:"latency"`
Timestamp int64 `json:"timestamp"`
} // @name RouteStatus
RouteStatuses map[string][]*Status // @name RouteStatuses
RouteStatuses map[string][]Status // @name RouteStatuses
RouteAggregate struct {
Alias string `json:"alias"`
DisplayName string `json:"display_name"`
Uptime float64 `json:"uptime"`
Downtime float64 `json:"downtime"`
Idle float64 `json:"idle"`
AvgLatency float64 `json:"avg_latency"`
Statuses []*Status `json:"statuses"`
Alias string `json:"alias"`
DisplayName string `json:"display_name"`
Uptime float32 `json:"uptime"`
Downtime float32 `json:"downtime"`
Idle float32 `json:"idle"`
AvgLatency float32 `json:"avg_latency"`
IsDocker bool `json:"is_docker"`
CurrentStatus types.HealthStatus `json:"current_status" swaggertype:"string" enums:"healthy,unhealthy,unknown,napping,starting"`
Statuses []Status `json:"statuses"`
} // @name RouteUptimeAggregate
Aggregated []RouteAggregate
)
@@ -64,9 +66,9 @@ func aggregateStatuses(entries []*StatusByAlias, query url.Values) (int, Aggrega
statuses := make(RouteStatuses)
for _, entry := range entries {
for alias, status := range entry.Map {
statuses[alias] = append(statuses[alias], &Status{
statuses[alias] = append(statuses[alias], Status{
Status: status.Status,
Latency: status.Latency.Milliseconds(),
Latency: int32(status.Latency.Milliseconds()),
Timestamp: entry.Timestamp,
})
}
@@ -81,12 +83,12 @@ func aggregateStatuses(entries []*StatusByAlias, query url.Values) (int, Aggrega
return len(statuses), statuses.aggregate(limit, offset)
}
func (rs RouteStatuses) calculateInfo(statuses []*Status) (up float64, down float64, idle float64, _ float64) {
func (rs RouteStatuses) calculateInfo(statuses []Status) (up float32, down float32, idle float32, _ float32) {
if len(statuses) == 0 {
return 0, 0, 0, 0
}
total := float64(0)
latency := float64(0)
total := float32(0)
latency := float32(0)
for _, status := range statuses {
// ignoring unknown; treating napping and starting as downtime
if status.Status == types.StatusUnknown {
@@ -101,7 +103,7 @@ func (rs RouteStatuses) calculateInfo(statuses []*Status) (up float64, down floa
down++
}
total++
latency += float64(status.Latency)
latency += float32(status.Latency)
}
if total == 0 {
return 0, 0, 0, 0
@@ -121,34 +123,41 @@ func (rs RouteStatuses) aggregate(limit int, offset int) Aggregated {
sortedAliases[i] = alias
i++
}
// unknown statuses are at the end, then sort by alias
slices.SortFunc(sortedAliases, func(a, b string) int {
if rs[a][len(rs[a])-1].Status == types.StatusUnknown {
return 1
}
if rs[b][len(rs[b])-1].Status == types.StatusUnknown {
return -1
}
return strings.Compare(a, b)
})
slices.Sort(sortedAliases)
sortedAliases = sortedAliases[beg:end]
result := make(Aggregated, len(sortedAliases))
for i, alias := range sortedAliases {
statuses := rs[alias]
up, down, idle, latency := rs.calculateInfo(statuses)
result[i] = RouteAggregate{
Alias: alias,
Uptime: up,
Downtime: down,
Idle: idle,
AvgLatency: latency,
Statuses: statuses,
}
displayName := alias
r, ok := routes.Get(alias)
if ok {
result[i].DisplayName = r.HomepageConfig().Name
} else {
result[i].DisplayName = alias
if !ok {
// also search for excluded routes
r = config.GetInstance().SearchRoute(alias)
}
if r != nil {
displayName = r.DisplayName()
}
status := types.StatusUnknown
if r != nil {
mon := r.HealthMonitor()
if mon != nil {
status = mon.Status()
}
}
result[i] = RouteAggregate{
Alias: alias,
DisplayName: displayName,
Uptime: up,
Downtime: down,
Idle: idle,
AvgLatency: latency,
CurrentStatus: status,
Statuses: statuses,
IsDocker: r != nil && r.IsDocker(),
}
}
return result