refactor(metrics): optimize JSON marshaling in SystemInfo and Aggregated structures for improved performance and memory management

This commit is contained in:
yusing
2025-09-04 06:25:07 +08:00
parent 24bcc2d2d2
commit fbe82c3082
2 changed files with 57 additions and 68 deletions

View File

@@ -1,20 +1,13 @@
package systeminfo package systeminfo
import ( import (
"encoding/json"
"fmt" "fmt"
"strconv" "strconv"
"github.com/shirou/gopsutil/v4/sensors"
"github.com/yusing/go-proxy/internal/utils/synk"
) )
var bufPool = synk.GetBytesPool()
// explicitly implement MarshalJSON to avoid reflection. // explicitly implement MarshalJSON to avoid reflection.
func (s *SystemInfo) MarshalJSON() ([]byte, error) { func (s *SystemInfo) MarshalJSON() ([]byte, error) {
b := bufPool.Get() b := make([]byte, 0, 4096)
defer bufPool.Put(b)
b = append(b, '{') b = append(b, '{')
@@ -114,15 +107,14 @@ func (s *SystemInfo) MarshalJSON() ([]byte, error) {
// sensors // sensors
b = append(b, `,"sensors":`...) b = append(b, `,"sensors":`...)
if len(s.Sensors) > 0 { if len(s.Sensors) > 0 {
b = append(b, '{') b = append(b, '[')
first := true first := true
for _, sensor := range s.Sensors { for _, sensor := range s.Sensors {
if !first { if !first {
b = append(b, ',') b = append(b, ',')
} }
b = fmt.Appendf(b, b = fmt.Appendf(b,
`"%s":{"name":"%s","temperature":%.2f,"high":%.2f,"critical":%.2f}`, `{"name":"%s","temperature":%.2f,"high":%.2f,"critical":%.2f}`,
sensor.SensorKey,
sensor.SensorKey, sensor.SensorKey,
sensor.Temperature, sensor.Temperature,
sensor.High, sensor.High,
@@ -130,7 +122,7 @@ func (s *SystemInfo) MarshalJSON() ([]byte, error) {
) )
first = false first = false
} }
b = append(b, '}') b = append(b, ']')
} else { } else {
b = append(b, "null"...) b = append(b, "null"...)
} }
@@ -139,34 +131,22 @@ func (s *SystemInfo) MarshalJSON() ([]byte, error) {
return b, nil return b, nil
} }
func (s *Sensors) UnmarshalJSON(data []byte) error {
var v map[string]map[string]any
if err := json.Unmarshal(data, &v); err != nil {
return err
}
if len(v) == 0 {
return nil
}
*s = make(Sensors, 0, len(v))
for k, v := range v {
*s = append(*s, sensors.TemperatureStat{
SensorKey: k,
Temperature: v["temperature"].(float64),
High: v["high"].(float64),
Critical: v["critical"].(float64),
})
}
return nil
}
func (result Aggregated) MarshalJSON() ([]byte, error) { func (result Aggregated) MarshalJSON() ([]byte, error) {
buf := bufPool.Get() if len(result.Entries) == 0 {
defer bufPool.Put(buf) return []byte("[]"), nil
}
capacity := 10 * 1024
if result.Mode == SystemInfoAggregateModeSensorTemperature {
// give each sensor key 30 bytes per entry per sensor key
capacity = 30 * len(result.Entries) * len(result.Entries[0])
}
buf := make([]byte, 0, capacity)
buf = append(buf, '[') buf = append(buf, '[')
i := 0 i := 0
n := len(result) n := len(result.Entries)
for _, entry := range result { for _, entry := range result.Entries {
buf = append(buf, '{') buf = append(buf, '{')
j := 0 j := 0
m := len(entry) m := len(entry)
@@ -178,10 +158,12 @@ func (result Aggregated) MarshalJSON() ([]byte, error) {
switch v := v.(type) { switch v := v.(type) {
case float64: case float64:
buf = strconv.AppendFloat(buf, v, 'f', 2, 64) buf = strconv.AppendFloat(buf, v, 'f', 2, 64)
case uint64: case int32:
buf = strconv.AppendUint(buf, v, 10) buf = strconv.AppendInt(buf, int64(v), 10)
case int64: case int64:
buf = strconv.AppendInt(buf, v, 10) buf = strconv.AppendInt(buf, v, 10)
case uint64:
buf = strconv.AppendUint(buf, v, 10)
default: default:
panic(fmt.Sprintf("unexpected type: %T", v)) panic(fmt.Sprintf("unexpected type: %T", v))
} }

View File

@@ -24,7 +24,11 @@ import (
type ( type (
Sensors []sensors.TemperatureStat // @name Sensors Sensors []sensors.TemperatureStat // @name Sensors
Aggregated []map[string]any Aggregated struct {
Entries []map[string]any
Mode SystemInfoAggregateMode
}
AggregatedJSON []map[string]any
) )
type SystemInfo struct { type SystemInfo struct {
@@ -218,37 +222,40 @@ func (s *SystemInfo) collectSensorsInfo(ctx context.Context) error {
// recharts friendly. // recharts friendly.
func aggregate(entries []*SystemInfo, query url.Values) (total int, result Aggregated) { func aggregate(entries []*SystemInfo, query url.Values) (total int, result Aggregated) {
n := len(entries) n := len(entries)
aggregated := make(Aggregated, 0, n) aggregated := Aggregated{
switch SystemInfoAggregateMode(query.Get("aggregate")) { Entries: make([]map[string]any, n),
Mode: SystemInfoAggregateMode(query.Get("aggregate")),
}
switch aggregated.Mode {
case SystemInfoAggregateModeCPUAverage: case SystemInfoAggregateModeCPUAverage:
for _, entry := range entries { for i, entry := range entries {
if entry.CPUAverage != nil { if entry.CPUAverage != nil {
aggregated = append(aggregated, map[string]any{ aggregated.Entries[i] = map[string]any{
"timestamp": entry.Timestamp, "timestamp": entry.Timestamp,
"cpu_average": *entry.CPUAverage, "cpu_average": *entry.CPUAverage,
}) }
} }
} }
case SystemInfoAggregateModeMemoryUsage: case SystemInfoAggregateModeMemoryUsage:
for _, entry := range entries { for i, entry := range entries {
if entry.Memory != nil { if entry.Memory != nil {
aggregated = append(aggregated, map[string]any{ aggregated.Entries[i] = map[string]any{
"timestamp": entry.Timestamp, "timestamp": entry.Timestamp,
"memory_usage": entry.Memory.Used, "memory_usage": entry.Memory.Used,
}) }
} }
} }
case SystemInfoAggregateModeMemoryUsagePercent: case SystemInfoAggregateModeMemoryUsagePercent:
for _, entry := range entries { for i, entry := range entries {
if entry.Memory != nil { if entry.Memory != nil {
aggregated = append(aggregated, map[string]any{ aggregated.Entries[i] = map[string]any{
"timestamp": entry.Timestamp, "timestamp": entry.Timestamp,
"memory_usage_percent": entry.Memory.UsedPercent, "memory_usage_percent": entry.Memory.UsedPercent,
}) }
} }
} }
case SystemInfoAggregateModeDisksReadSpeed: case SystemInfoAggregateModeDisksReadSpeed:
for _, entry := range entries { for i, entry := range entries {
if entry.DisksIO == nil { if entry.DisksIO == nil {
continue continue
} }
@@ -257,10 +264,10 @@ func aggregate(entries []*SystemInfo, query url.Values) (total int, result Aggre
m[name] = usage.ReadSpeed m[name] = usage.ReadSpeed
} }
m["timestamp"] = entry.Timestamp m["timestamp"] = entry.Timestamp
aggregated = append(aggregated, m) aggregated.Entries[i] = m
} }
case SystemInfoAggregateModeDisksWriteSpeed: case SystemInfoAggregateModeDisksWriteSpeed:
for _, entry := range entries { for i, entry := range entries {
if entry.DisksIO == nil { if entry.DisksIO == nil {
continue continue
} }
@@ -269,10 +276,10 @@ func aggregate(entries []*SystemInfo, query url.Values) (total int, result Aggre
m[name] = usage.WriteSpeed m[name] = usage.WriteSpeed
} }
m["timestamp"] = entry.Timestamp m["timestamp"] = entry.Timestamp
aggregated = append(aggregated, m) aggregated.Entries[i] = m
} }
case SystemInfoAggregateModeDisksIOPS: case SystemInfoAggregateModeDisksIOPS:
for _, entry := range entries { for i, entry := range entries {
if entry.DisksIO == nil { if entry.DisksIO == nil {
continue continue
} }
@@ -281,10 +288,10 @@ func aggregate(entries []*SystemInfo, query url.Values) (total int, result Aggre
m[name] = usage.Iops m[name] = usage.Iops
} }
m["timestamp"] = entry.Timestamp m["timestamp"] = entry.Timestamp
aggregated = append(aggregated, m) aggregated.Entries[i] = m
} }
case SystemInfoAggregateModeDiskUsage: case SystemInfoAggregateModeDiskUsage:
for _, entry := range entries { for i, entry := range entries {
if entry.Disks == nil { if entry.Disks == nil {
continue continue
} }
@@ -293,32 +300,32 @@ func aggregate(entries []*SystemInfo, query url.Values) (total int, result Aggre
m[name] = disk.Used m[name] = disk.Used
} }
m["timestamp"] = entry.Timestamp m["timestamp"] = entry.Timestamp
aggregated = append(aggregated, m) aggregated.Entries[i] = m
} }
case SystemInfoAggregateModeNetworkSpeed: case SystemInfoAggregateModeNetworkSpeed:
for _, entry := range entries { for i, entry := range entries {
if entry.Network == nil { if entry.Network == nil {
continue continue
} }
aggregated = append(aggregated, map[string]any{ aggregated.Entries[i] = map[string]any{
"timestamp": entry.Timestamp, "timestamp": entry.Timestamp,
"upload": entry.Network.UploadSpeed, "upload": entry.Network.UploadSpeed,
"download": entry.Network.DownloadSpeed, "download": entry.Network.DownloadSpeed,
}) }
} }
case SystemInfoAggregateModeNetworkTransfer: case SystemInfoAggregateModeNetworkTransfer:
for _, entry := range entries { for i, entry := range entries {
if entry.Network == nil { if entry.Network == nil {
continue continue
} }
aggregated = append(aggregated, map[string]any{ aggregated.Entries[i] = map[string]any{
"timestamp": entry.Timestamp, "timestamp": entry.Timestamp,
"upload": entry.Network.BytesSent, "upload": entry.Network.BytesSent,
"download": entry.Network.BytesRecv, "download": entry.Network.BytesRecv,
}) }
} }
case SystemInfoAggregateModeSensorTemperature: case SystemInfoAggregateModeSensorTemperature:
for _, entry := range entries { for i, entry := range entries {
if entry.Sensors == nil { if entry.Sensors == nil {
continue continue
} }
@@ -327,12 +334,12 @@ func aggregate(entries []*SystemInfo, query url.Values) (total int, result Aggre
m[sensor.SensorKey] = sensor.Temperature m[sensor.SensorKey] = sensor.Temperature
} }
m["timestamp"] = entry.Timestamp m["timestamp"] = entry.Timestamp
aggregated = append(aggregated, m) aggregated.Entries[i] = m
} }
default: default:
return -1, nil return -1, Aggregated{}
} }
return len(aggregated), aggregated return len(aggregated.Entries), aggregated
} }
func diff(x, y uint64) uint64 { func diff(x, y uint64) uint64 {