refactor: move some io, http and string utils to separate repo

This commit is contained in:
yusing
2025-09-27 12:46:41 +08:00
parent acecd827d6
commit 22bcf1201b
115 changed files with 125 additions and 3815 deletions

View File

@@ -1,210 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Modified from bufio.Writer by yusing <yusing@6uo.me>.
package utils
import (
"io"
"unicode/utf8"
)
// buffered output
// BufferedWriter implements buffering for an [io.BufferedWriter] object.
// If an error occurs writing to a [BufferedWriter], no more data will be
// accepted and all subsequent writes, and [BufferedWriter.Flush], will return the error.
// After all data has been written, the client should call the
// [BufferedWriter.Flush] method to guarantee all data has been forwarded to
// the underlying [io.BufferedWriter].
type BufferedWriter struct {
err error
buf []byte
n int
wr io.Writer
}
// NewBufferedWriter returns a new [BufferedWriter] whose buffer has at least the specified
// size. If the argument io.Writer is already a [BufferedWriter] with large enough
// size, it returns the underlying [BufferedWriter].
func NewBufferedWriter(w io.Writer, size int) *BufferedWriter {
// Is it already a Writer?
b, ok := w.(*BufferedWriter)
if ok && len(b.buf) >= size {
return b
}
return &BufferedWriter{
buf: bytesPool.GetSized(size),
wr: w,
}
}
// Size returns the size of the underlying buffer in bytes.
func (b *BufferedWriter) Size() int { return len(b.buf) }
func (b *BufferedWriter) Resize(size int) error {
err := b.Flush()
if err != nil {
return err
}
if cap(b.buf) >= size {
b.buf = b.buf[:size]
} else {
b.Release()
b.buf = bytesPool.GetSized(size)
}
b.err = nil
b.n = 0
return nil
}
func (b *BufferedWriter) Release() {
bytesPool.Put(b.buf)
}
// Flush writes any buffered data to the underlying [io.Writer].
func (b *BufferedWriter) Flush() error {
if b.err != nil {
return b.err
}
if b.n == 0 {
return nil
}
n, err := b.wr.Write(b.buf[0:b.n])
if n < b.n && err == nil {
err = io.ErrShortWrite
}
if err != nil {
if n > 0 && n < b.n {
copy(b.buf[0:b.n-n], b.buf[n:b.n])
}
b.n -= n
b.err = err
return err
}
b.n = 0
return nil
}
// Available returns how many bytes are unused in the buffer.
func (b *BufferedWriter) Available() int { return len(b.buf) - b.n }
// AvailableBuffer returns an empty buffer with b.Available() capacity.
// This buffer is intended to be appended to and
// passed to an immediately succeeding [BufferedWriter.Write] call.
// The buffer is only valid until the next write operation on b.
func (b *BufferedWriter) AvailableBuffer() []byte {
return b.buf[b.n:][:0]
}
// Buffered returns the number of bytes that have been written into the current buffer.
func (b *BufferedWriter) Buffered() int { return b.n }
// Write writes the contents of p into the buffer.
// It returns the number of bytes written.
// If nn < len(p), it also returns an error explaining
// why the write is short.
func (b *BufferedWriter) Write(p []byte) (nn int, err error) {
for len(p) > b.Available() && b.err == nil {
var n int
if b.Buffered() == 0 {
// Large write, empty buffer.
// Write directly from p to avoid copy.
n, b.err = b.wr.Write(p)
} else {
n = copy(b.buf[b.n:], p)
b.n += n
b.Flush()
}
nn += n
p = p[n:]
}
if b.err != nil {
return nn, b.err
}
n := copy(b.buf[b.n:], p)
b.n += n
nn += n
return nn, nil
}
// WriteByte writes a single byte.
func (b *BufferedWriter) WriteByte(c byte) error {
if b.err != nil {
return b.err
}
if b.Available() <= 0 && b.Flush() != nil {
return b.err
}
b.buf[b.n] = c
b.n++
return nil
}
// WriteRune writes a single Unicode code point, returning
// the number of bytes written and any error.
func (b *BufferedWriter) WriteRune(r rune) (size int, err error) {
// Compare as uint32 to correctly handle negative runes.
if uint32(r) < utf8.RuneSelf {
err = b.WriteByte(byte(r))
if err != nil {
return 0, err
}
return 1, nil
}
if b.err != nil {
return 0, b.err
}
n := b.Available()
if n < utf8.UTFMax {
if b.Flush(); b.err != nil {
return 0, b.err
}
n = b.Available()
if n < utf8.UTFMax {
// Can only happen if buffer is silly small.
return b.WriteString(string(r))
}
}
size = utf8.EncodeRune(b.buf[b.n:], r)
b.n += size
return size, nil
}
// WriteString writes a string.
// It returns the number of bytes written.
// If the count is less than len(s), it also returns an error explaining
// why the write is short.
func (b *BufferedWriter) WriteString(s string) (int, error) {
var sw io.StringWriter
tryStringWriter := true
nn := 0
for len(s) > b.Available() && b.err == nil {
var n int
if b.Buffered() == 0 && sw == nil && tryStringWriter {
// Check at most once whether b.wr is a StringWriter.
sw, tryStringWriter = b.wr.(io.StringWriter)
}
if b.Buffered() == 0 && tryStringWriter {
// Large write, empty buffer, and the underlying writer supports
// WriteString: forward the write to the underlying StringWriter.
// This avoids an extra copy.
n, b.err = sw.WriteString(s)
} else {
n = copy(b.buf[b.n:], s)
b.n += n
b.Flush()
}
nn += n
s = s[n:]
}
if b.err != nil {
return nn, b.err
}
n := copy(b.buf[b.n:], s)
b.n += n
nn += n
return nn, nil
}

View File

@@ -6,8 +6,8 @@ require (
github.com/puzpuzpuz/xsync/v4 v4.2.0
github.com/rs/zerolog v1.34.0
github.com/stretchr/testify v1.11.1
github.com/yusing/goutils v0.2.1
go.uber.org/atomic v1.11.0
golang.org/x/text v0.29.0
)
require (
@@ -16,5 +16,6 @@ require (
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
golang.org/x/sys v0.36.0 // indirect
golang.org/x/text v0.29.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -19,6 +19,8 @@ github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/yusing/goutils v0.2.1 h1:KjoCrNO0otthaPCZPfQY+5GKsqs5+J77CxP+TNHYa/Y=
github.com/yusing/goutils v0.2.1/go.mod h1:v6RZsMRdzcts4udSg0vqUIFvaD0OaUMPTwYJZ4XnQYo=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View File

@@ -1,259 +0,0 @@
package utils
import (
"context"
"errors"
"io"
"net/http"
"sync"
"syscall"
"github.com/yusing/godoxy/internal/utils/synk"
)
// TODO: move to "utils/io".
type (
FileReader struct {
Path string
}
ContextReader struct {
ctx context.Context
io.Reader
}
ContextWriter struct {
ctx context.Context
io.Writer
}
Pipe struct {
r ContextReader
w ContextWriter
}
BidirectionalPipe struct {
pSrcDst *Pipe
pDstSrc *Pipe
}
HookCloser struct {
c io.ReadCloser
hook func()
}
)
func NewContextReader(ctx context.Context, r io.Reader) *ContextReader {
return &ContextReader{ctx: ctx, Reader: r}
}
func NewContextWriter(ctx context.Context, w io.Writer) *ContextWriter {
return &ContextWriter{ctx: ctx, Writer: w}
}
func (r *ContextReader) Read(p []byte) (int, error) {
select {
case <-r.ctx.Done():
return 0, r.ctx.Err()
default:
return r.Reader.Read(p)
}
}
func (w *ContextWriter) Write(p []byte) (int, error) {
select {
case <-w.ctx.Done():
return 0, w.ctx.Err()
default:
return w.Writer.Write(p)
}
}
func NewPipe(ctx context.Context, r io.ReadCloser, w io.WriteCloser) *Pipe {
return &Pipe{
r: ContextReader{ctx: ctx, Reader: r},
w: ContextWriter{ctx: ctx, Writer: w},
}
}
func (p *Pipe) Start() (err error) {
err = CopyClose(&p.w, &p.r, 0)
switch {
case
// NOTE: ignoring broken pipe and connection reset by peer
errors.Is(err, syscall.EPIPE),
errors.Is(err, syscall.ECONNRESET):
return nil
}
return err
}
func NewBidirectionalPipe(ctx context.Context, rw1 io.ReadWriteCloser, rw2 io.ReadWriteCloser) BidirectionalPipe {
return BidirectionalPipe{
pSrcDst: NewPipe(ctx, rw1, rw2),
pDstSrc: NewPipe(ctx, rw2, rw1),
}
}
func (p BidirectionalPipe) Start() error {
var wg sync.WaitGroup
var srcErr, dstErr error
wg.Go(func() {
srcErr = p.pSrcDst.Start()
})
wg.Go(func() {
dstErr = p.pDstSrc.Start()
})
wg.Wait()
return errors.Join(srcErr, dstErr)
}
type flushErrorInterface interface {
FlushError() error
}
type flusherWrapper struct {
rw http.Flusher
}
type rwUnwrapper interface {
Unwrap() http.ResponseWriter
}
func (f *flusherWrapper) FlushError() error {
f.rw.Flush()
return nil
}
func getHTTPFlusher(dst io.Writer) flushErrorInterface {
// pre-unwrap the flusher to prevent unwrap and check in every loop
if rw, ok := dst.(http.ResponseWriter); ok {
for {
switch t := rw.(type) {
case flushErrorInterface:
return t
case http.Flusher:
return &flusherWrapper{rw: t}
case rwUnwrapper:
rw = t.Unwrap()
default:
return nil
}
}
}
return nil
}
const copyBufSize = synk.SizedPoolThreshold
var bytesPool = synk.GetBytesPool()
// ReadAllBody reads the body of the response into a buffer and returns it and a function to release the buffer.
func ReadAllBody(resp *http.Response) (buf []byte, release func(), err error) {
if contentLength := resp.ContentLength; contentLength > 0 {
buf = bytesPool.GetSized(int(contentLength))
_, err = io.ReadFull(resp.Body, buf)
if err != nil {
bytesPool.Put(buf)
return nil, nil, err
}
return buf, func() { bytesPool.Put(buf) }, nil
}
buf, err = io.ReadAll(resp.Body)
if err != nil {
bytesPool.Put(buf)
return nil, nil, err
}
return buf, func() { bytesPool.Put(buf) }, nil
}
// NewHookCloser wraps a io.ReadCloser and calls the hook function when the closer is closed.
func NewHookCloser(c io.ReadCloser, hook func()) *HookCloser {
return &HookCloser{hook: hook, c: c}
}
// Close calls the hook function and closes the underlying reader
func (r *HookCloser) Close() error {
r.hook()
return r.c.Close()
}
// Read reads from the underlying reader.
func (r *HookCloser) Read(p []byte) (int, error) {
return r.c.Read(p)
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// This is a copy of io.Copy with context and HTTP flusher handling
// Author: yusing <yusing@6uo.me>.
func CopyClose(dst *ContextWriter, src *ContextReader, sizeHint int) (err error) {
size := copyBufSize
if l, ok := src.Reader.(*io.LimitedReader); ok {
if int64(size) > l.N {
if l.N < 1 {
size = 1
} else {
size = int(l.N)
}
}
} else if sizeHint > 0 {
size = min(size, sizeHint)
}
buf := bytesPool.GetSized(size)
defer bytesPool.Put(buf)
// close both as soon as one of them is done
wCloser, wCanClose := dst.Writer.(io.Closer)
rCloser, rCanClose := src.Reader.(io.Closer)
if wCanClose || rCanClose {
go func() {
select {
case <-src.ctx.Done():
case <-dst.ctx.Done():
}
if rCanClose {
defer rCloser.Close()
}
if wCanClose {
defer wCloser.Close()
}
}()
}
flusher := getHTTPFlusher(dst.Writer)
for {
nr, er := src.Reader.Read(buf)
if nr > 0 {
nw, ew := dst.Writer.Write(buf[0:nr])
if nw < 0 || nr < nw {
nw = 0
if ew == nil {
ew = errors.New("invalid write result")
}
}
if ew != nil {
err = ew
return
}
if nr != nw {
err = io.ErrShortWrite
return
}
if flusher != nil {
err = flusher.FlushError()
if err != nil {
return err
}
}
}
if er != nil {
if er != io.EOF {
err = er
}
return
}
}
}
func CopyCloseWithContext(ctx context.Context, dst io.Writer, src io.Reader, sizeHint int) (err error) {
return CopyClose(NewContextWriter(ctx, dst), NewContextReader(ctx, src), sizeHint)
}

View File

@@ -3,7 +3,7 @@ package utils
import (
"reflect"
"github.com/yusing/godoxy/internal/utils/strutils"
strutils "github.com/yusing/goutils/strings"
)
func NearestField(input string, s any) string {

View File

@@ -1,47 +0,0 @@
package ansi
import (
"regexp"
)
var ansiRegexp = regexp.MustCompile(`\x1b\[[0-9;]*m`)
const (
BrightRed = "\x1b[91m"
BrightGreen = "\x1b[92m"
BrightYellow = "\x1b[93m"
BrightCyan = "\x1b[96m"
BrightWhite = "\x1b[97m"
Bold = "\x1b[1m"
Reset = "\x1b[0m"
HighlightRed = BrightRed + Bold
HighlightGreen = BrightGreen + Bold
HighlightYellow = BrightYellow + Bold
HighlightCyan = BrightCyan + Bold
HighlightWhite = BrightWhite + Bold
)
func Error(s string) string {
return WithANSI(s, HighlightRed)
}
func Success(s string) string {
return WithANSI(s, HighlightGreen)
}
func Warning(s string) string {
return WithANSI(s, HighlightYellow)
}
func Info(s string) string {
return WithANSI(s, HighlightCyan)
}
func WithANSI(s string, ansi string) string {
return ansi + s + Reset
}
func StripANSI(s string) string {
return ansiRegexp.ReplaceAllString(s, "")
}

View File

@@ -1,11 +0,0 @@
package strutils
import "strings"
// IsValidFilename checks if a filename is safe and doesn't contain path traversal attempts
// Returns true if the filename is valid, false otherwise
func IsValidFilename(filename string) bool {
return !strings.Contains(filename, "/") &&
!strings.Contains(filename, "\\") &&
!strings.Contains(filename, "..")
}

View File

@@ -1,221 +0,0 @@
package strutils
import (
"fmt"
"math"
"strconv"
"time"
)
// AppendDuration appends a duration to a buffer with the following format:
// - 1 ns
// - 1 ms
// - 1 seconds
// - 1 minutes and 1 seconds
// - 1 hours, 1 minutes and 1 seconds
// - 1 days, 1 hours and 1 minutes (ignore seconds if days >= 1)
func AppendDuration(d time.Duration, buf []byte) []byte {
if d < 0 {
buf = append(buf, '-')
d = -d
}
if d == 0 {
return append(buf, []byte("0 Seconds")...)
}
switch {
case d < time.Millisecond:
buf = strconv.AppendInt(buf, d.Nanoseconds(), 10)
buf = append(buf, []byte(" ns")...)
return buf
case d < time.Second:
buf = strconv.AppendInt(buf, d.Milliseconds(), 10)
buf = append(buf, []byte(" ms")...)
return buf
}
// Get total seconds from duration
totalSeconds := int64(d.Seconds())
// Calculate days, hours, minutes, and seconds
days := totalSeconds / (24 * 3600)
hours := (totalSeconds % (24 * 3600)) / 3600
minutes := (totalSeconds % 3600) / 60
seconds := totalSeconds % 60
idxPartBeg := 0
if days > 0 {
buf = strconv.AppendInt(buf, days, 10)
buf = fmt.Appendf(buf, " day%s, ", Pluralize(days))
}
if hours > 0 {
idxPartBeg = len(buf) - 2
buf = strconv.AppendInt(buf, hours, 10)
buf = fmt.Appendf(buf, " hour%s, ", Pluralize(hours))
}
if minutes > 0 {
idxPartBeg = len(buf) - 2
buf = strconv.AppendInt(buf, minutes, 10)
buf = fmt.Appendf(buf, " minute%s, ", Pluralize(minutes))
}
if seconds > 0 && totalSeconds < 3600 {
idxPartBeg = len(buf) - 2
buf = strconv.AppendInt(buf, seconds, 10)
buf = fmt.Appendf(buf, " second%s, ", Pluralize(seconds))
}
// remove last comma and space
buf = buf[:len(buf)-2]
if idxPartBeg > 0 && idxPartBeg < len(buf) {
// replace last part ', ' with ' and ' in-place, alloc-free
// ', ' is 2 bytes, ' and ' is 5 bytes, so we need to make room for 3 more bytes
tailLen := len(buf) - (idxPartBeg + 2)
buf = append(buf, "000"...) // append 3 bytes for ' and '
copy(buf[idxPartBeg+5:], buf[idxPartBeg+2:idxPartBeg+2+tailLen]) // shift tail right by 3
copy(buf[idxPartBeg:], " and ") // overwrite ', ' with ' and '
}
return buf
}
func FormatDuration(d time.Duration) string {
return string(AppendDuration(d, nil))
}
func FormatLastSeen(t time.Time) string {
if t.IsZero() {
return "never"
}
return FormatTime(t)
}
func appendRound(f float64, buf []byte) []byte {
return strconv.AppendInt(buf, int64(math.Round(f)), 10)
}
func appendFloat(f float64, buf []byte) []byte {
f = math.Round(f*100) / 100
if f == 0 {
return buf
}
return strconv.AppendFloat(buf, f, 'f', -1, 64)
}
func AppendTime(t time.Time, buf []byte) []byte {
if t.IsZero() {
return append(buf, []byte("never")...)
}
return AppendTimeWithReference(t, time.Now(), buf)
}
func FormatTime(t time.Time) string {
return string(AppendTime(t, nil))
}
func FormatUnixTime(t int64) string {
return FormatTime(time.Unix(t, 0))
}
func FormatTimeWithReference(t, ref time.Time) string {
return string(AppendTimeWithReference(t, ref, nil))
}
func AppendTimeWithReference(t, ref time.Time, buf []byte) []byte {
if t.IsZero() {
return append(buf, []byte("never")...)
}
diff := t.Sub(ref)
absDiff := diff.Abs()
switch {
case absDiff < time.Second:
return append(buf, []byte("now")...)
case absDiff < 3*time.Second:
if diff < 0 {
return append(buf, []byte("just now")...)
}
fallthrough
case absDiff < 60*time.Second:
if diff < 0 {
buf = appendRound(absDiff.Seconds(), buf)
buf = append(buf, []byte(" seconds ago")...)
} else {
buf = append(buf, []byte("in ")...)
buf = appendRound(absDiff.Seconds(), buf)
buf = append(buf, []byte(" seconds")...)
}
return buf
case absDiff < 60*time.Minute:
if diff < 0 {
buf = appendRound(absDiff.Minutes(), buf)
buf = append(buf, []byte(" minutes ago")...)
} else {
buf = append(buf, []byte("in ")...)
buf = appendRound(absDiff.Minutes(), buf)
buf = append(buf, []byte(" minutes")...)
}
return buf
case absDiff < 24*time.Hour:
if diff < 0 {
buf = appendRound(absDiff.Hours(), buf)
buf = append(buf, []byte(" hours ago")...)
} else {
buf = append(buf, []byte("in ")...)
buf = appendRound(absDiff.Hours(), buf)
buf = append(buf, []byte(" hours")...)
}
return buf
case t.Year() == ref.Year():
return t.AppendFormat(buf, "01-02 15:04:05")
default:
return t.AppendFormat(buf, "2006-01-02 15:04:05")
}
}
func FormatByteSize[T ~int | ~uint | ~int64 | ~uint64 | ~float64](size T) string {
return string(AppendByteSize(size, nil))
}
func AppendByteSize[T ~int | ~uint | ~int64 | ~uint64 | ~float64](size T, buf []byte) []byte {
const (
_ = (1 << (10 * iota))
kb
mb
gb
tb
pb
)
switch {
case size < kb:
switch any(size).(type) {
case int, int64:
buf = strconv.AppendInt(buf, int64(size), 10)
case uint, uint64:
buf = strconv.AppendUint(buf, uint64(size), 10)
case float64:
buf = appendFloat(float64(size), buf)
}
buf = append(buf, []byte(" B")...)
case size < mb:
buf = appendFloat(float64(size)/kb, buf)
buf = append(buf, []byte(" KiB")...)
case size < gb:
buf = appendFloat(float64(size)/mb, buf)
buf = append(buf, []byte(" MiB")...)
case size < tb:
buf = appendFloat(float64(size)/gb, buf)
buf = append(buf, []byte(" GiB")...)
case size < pb:
buf = appendFloat(float64(size/gb)/kb, buf)
buf = append(buf, []byte(" TiB")...)
default:
buf = appendFloat(float64(size/tb)/kb, buf)
buf = append(buf, []byte(" PiB")...)
}
return buf
}
func Pluralize(n int64) string {
if n > 1 {
return "s"
}
return ""
}

View File

@@ -1,302 +0,0 @@
package strutils_test
import (
"testing"
"time"
"github.com/stretchr/testify/require"
. "github.com/yusing/godoxy/internal/utils/strutils"
)
func mustParseTime(t *testing.T, layout, value string) time.Time {
t.Helper()
time, err := time.Parse(layout, value)
if err != nil {
t.Fatalf("failed to parse time: %s", err)
}
return time
}
func TestFormatTime(t *testing.T) {
now := mustParseTime(t, time.RFC3339, "2021-06-15T12:30:30Z")
tests := []struct {
name string
time time.Time
expected string
expectedLength int
}{
{
name: "now",
time: now.Add(100 * time.Millisecond),
expected: "now",
},
{
name: "just now (past within 3 seconds)",
time: now.Add(-1 * time.Second),
expected: "just now",
},
{
name: "seconds ago",
time: now.Add(-10 * time.Second),
expected: "10 seconds ago",
},
{
name: "in seconds",
time: now.Add(10 * time.Second),
expected: "in 10 seconds",
},
{
name: "minutes ago",
time: now.Add(-10 * time.Minute),
expected: "10 minutes ago",
},
{
name: "in minutes",
time: now.Add(10 * time.Minute),
expected: "in 10 minutes",
},
{
name: "hours ago",
time: now.Add(-10 * time.Hour),
expected: "10 hours ago",
},
{
name: "in hours",
time: now.Add(10 * time.Hour),
expected: "in 10 hours",
},
{
name: "different day",
time: now.Add(-25 * time.Hour),
expectedLength: len("01-01 15:04:05"),
},
{
name: "same year but different month",
time: now.Add(-30 * 24 * time.Hour),
expectedLength: len("01-01 15:04:05"),
},
{
name: "different year",
time: time.Date(now.Year()-1, 1, 1, 10, 20, 30, 0, now.Location()),
expected: time.Date(now.Year()-1, 1, 1, 10, 20, 30, 0, now.Location()).Format("2006-01-02 15:04:05"),
},
{
name: "zero time",
time: time.Time{},
expected: "never",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := FormatTimeWithReference(tt.time, now)
if tt.expectedLength > 0 {
require.Len(t, result, tt.expectedLength)
} else {
require.Equal(t, tt.expected, result)
}
})
}
}
func TestFormatDuration(t *testing.T) {
tests := []struct {
name string
duration time.Duration
expected string
}{
{
name: "zero duration",
duration: 0,
expected: "0 Seconds",
},
{
name: "seconds only",
duration: 45 * time.Second,
expected: "45 seconds",
},
{
name: "one second",
duration: 1 * time.Second,
expected: "1 second",
},
{
name: "minutes only",
duration: 5 * time.Minute,
expected: "5 minutes",
},
{
name: "one minute",
duration: 1 * time.Minute,
expected: "1 minute",
},
{
name: "hours only",
duration: 3 * time.Hour,
expected: "3 hours",
},
{
name: "one hour",
duration: 1 * time.Hour,
expected: "1 hour",
},
{
name: "days only",
duration: 2 * 24 * time.Hour,
expected: "2 days",
},
{
name: "one day",
duration: 24 * time.Hour,
expected: "1 day",
},
{
name: "complex duration",
duration: 2*24*time.Hour + 3*time.Hour + 45*time.Minute + 15*time.Second,
expected: "2 days, 3 hours and 45 minutes",
},
{
name: "hours and minutes",
duration: 2*time.Hour + 30*time.Minute,
expected: "2 hours and 30 minutes",
},
{
name: "days and hours",
duration: 1*24*time.Hour + 12*time.Hour,
expected: "1 day and 12 hours",
},
{
name: "days and hours and minutes",
duration: 1*24*time.Hour + 12*time.Hour + 30*time.Minute,
expected: "1 day, 12 hours and 30 minutes",
},
{
name: "days and hours and minutes and seconds (ignore seconds)",
duration: 1*24*time.Hour + 12*time.Hour + 30*time.Minute + 15*time.Second,
expected: "1 day, 12 hours and 30 minutes",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := FormatDuration(tt.duration)
require.Equal(t, tt.expected, result)
})
}
}
func TestFormatLastSeen(t *testing.T) {
now := time.Now()
tests := []struct {
name string
time time.Time
expected string
}{
{
name: "zero time",
time: time.Time{},
expected: "never",
},
{
name: "non-zero time",
time: now.Add(-10 * time.Minute),
// The actual result will be handled by FormatTime, which is tested separately
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := FormatLastSeen(tt.time)
if tt.name == "zero time" {
require.Equal(t, tt.expected, result)
} else if result == "never" { // Just make sure it's not "never", the actual formatting is tested in TestFormatTime
t.Errorf("Expected non-zero time to not return 'never', got %s", result)
}
})
}
}
func TestFormatByteSize(t *testing.T) {
tests := []struct {
name string
size int64
expected string
}{
{
name: "zero size",
size: 0,
expected: "0 B",
},
{
name: "one byte",
size: 1,
expected: "1 B",
},
{
name: "bytes (less than 1 KiB)",
size: 1023,
expected: "1023 B",
},
{
name: "1 KiB",
size: 1024,
expected: "1 KiB",
},
{
name: "KiB (less than 1 MiB)",
size: 1024 * 1023,
expected: "1023 KiB",
},
{
name: "1 MiB",
size: 1024 * 1024,
expected: "1 MiB",
},
{
name: "MiB (less than 1 GiB)",
size: 1024 * 1024 * 1023,
expected: "1023 MiB",
},
{
name: "1 GiB",
size: 1024 * 1024 * 1024,
expected: "1 GiB",
},
{
name: "GiB (less than 1 TiB)",
size: 1024 * 1024 * 1024 * 1023,
expected: "1023 GiB",
},
{
name: "1 TiB",
size: 1024 * 1024 * 1024 * 1024,
expected: "1 TiB",
},
{
name: "TiB (less than 1 PiB)",
size: 1024 * 1024 * 1024 * 1024 * 1023,
expected: "1023 TiB",
},
{
name: "1 PiB",
size: 1024 * 1024 * 1024 * 1024 * 1024,
expected: "1 PiB",
},
{
name: "PiB (large number)",
size: 1024 * 1024 * 1024 * 1024 * 1024 * 1023,
expected: "1023 PiB",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := FormatByteSize(tt.size)
require.Equal(t, tt.expected, result)
})
}
}

View File

@@ -1,26 +0,0 @@
package strutils
import (
"reflect"
)
type Parser interface {
Parse(value string) error
}
func Parse[T Parser](from string) (t T, err error) {
tt := reflect.TypeOf(t)
if tt.Kind() == reflect.Ptr {
t = reflect.New(tt.Elem()).Interface().(T)
}
err = t.Parse(from)
return t, err
}
func MustParse[T Parser](from string) T {
t, err := Parse[T](from)
if err != nil {
panic("must failed: " + err.Error())
}
return t
}

View File

@@ -1,81 +0,0 @@
package strutils
import (
"math"
"strings"
)
// SplitRune is like strings.Split but takes a rune as separator.
func SplitRune(s string, sep rune) []string {
if sep == 0 {
return strings.Split(s, "")
}
n := strings.Count(s, string(sep)) + 1
if n > len(s)+1 {
n = len(s) + 1
}
a := make([]string, n)
n--
i := 0
for i < n {
m := strings.IndexRune(s, sep)
if m < 0 {
break
}
a[i] = s[:m]
s = s[m+1:]
i++
}
a[i] = s
return a[:i+1]
}
// SplitComma is a wrapper around SplitRune(s, ',').
func SplitComma(s string) []string {
return SplitRune(s, ',')
}
// SplitLine is a wrapper around SplitRune(s, '\n').
func SplitLine(s string) []string {
return SplitRune(s, '\n')
}
// SplitSpace is a wrapper around SplitRune(s, ' ').
func SplitSpace(s string) []string {
return SplitRune(s, ' ')
}
// JoinRune is like strings.Join but takes a rune as separator.
func JoinRune(elems []string, sep rune) string {
switch len(elems) {
case 0:
return ""
case 1:
return elems[0]
}
if sep == 0 {
return strings.Join(elems, "")
}
var n int
for _, elem := range elems {
if len(elem) > math.MaxInt-n {
panic("strings: Join output length overflow")
}
n += len(elem)
}
var b strings.Builder
b.Grow(n)
b.WriteString(elems[0])
for _, s := range elems[1:] {
b.WriteRune(sep)
b.WriteString(s)
}
return b.String()
}
// JoinLines is a wrapper around JoinRune(elems, '\n').
func JoinLines(elems []string) string {
return JoinRune(elems, '\n')
}

View File

@@ -1,62 +0,0 @@
package strutils_test
import (
"strings"
"testing"
"github.com/stretchr/testify/require"
. "github.com/yusing/godoxy/internal/utils/strutils"
)
var alphaNumeric = func() string {
var s strings.Builder
for i := range 'z' - 'a' + 1 {
s.WriteRune('a' + i)
s.WriteRune('A' + i)
s.WriteRune(',')
}
for i := range '9' - '0' + 1 {
s.WriteRune('0' + i)
s.WriteRune(',')
}
return s.String()
}()
func TestSplit(t *testing.T) {
tests := map[string]rune{
"": 0,
"1": '1',
",": ',',
}
for sep, rsep := range tests {
t.Run(sep, func(t *testing.T) {
expected := strings.Split(alphaNumeric, sep)
require.Equal(t, expected, SplitRune(alphaNumeric, rsep))
require.Equal(t, alphaNumeric, JoinRune(expected, rsep))
})
}
}
func BenchmarkSplitRune(b *testing.B) {
for range b.N {
SplitRune(alphaNumeric, ',')
}
}
func BenchmarkSplitRuneStdlib(b *testing.B) {
for range b.N {
strings.Split(alphaNumeric, ",")
}
}
func BenchmarkJoinRune(b *testing.B) {
for range b.N {
JoinRune(SplitRune(alphaNumeric, ','), ',')
}
}
func BenchmarkJoinRuneStdlib(b *testing.B) {
for range b.N {
strings.Join(SplitRune(alphaNumeric, ','), ",")
}
}

View File

@@ -1,7 +0,0 @@
package strutils
import (
"strconv"
)
var Atoi = strconv.Atoi

View File

@@ -1,98 +0,0 @@
package strutils
import (
"strings"
"golang.org/x/text/cases"
"golang.org/x/text/language"
)
// CommaSeperatedList returns a list of strings split by commas,
// then trim spaces from each element.
func CommaSeperatedList(s string) []string {
if s == "" {
return []string{}
}
res := SplitComma(s)
for i, part := range res {
res[i] = strings.TrimSpace(part)
}
return res
}
var caseTitle = cases.Title(language.AmericanEnglish)
func Title(s string) string {
return caseTitle.String(s)
}
func ContainsFold(s, substr string) bool {
return IndexFold(s, substr) >= 0
}
func IndexFold(s, substr string) int {
return strings.Index(strings.ToLower(s), strings.ToLower(substr))
}
func ToLowerNoSnake(s string) string {
var buf strings.Builder
for _, r := range s {
if r == '_' {
continue
}
if r >= 'A' && r <= 'Z' {
r += 'a' - 'A'
}
buf.WriteRune(r)
}
return buf.String()
}
//nolint:intrange
func LevenshteinDistance(a, b string) int {
if a == b {
return 0
}
if len(a) == 0 {
return len(b)
}
if len(b) == 0 {
return len(a)
}
v0 := make([]int, len(b)+1)
v1 := make([]int, len(b)+1)
for i := 0; i <= len(b); i++ {
v0[i] = i
}
for i := 0; i < len(a); i++ {
v1[0] = i + 1
for j := 0; j < len(b); j++ {
cost := 0
if a[i] != b[j] {
cost = 1
}
v1[j+1] = min3(v1[j]+1, v0[j+1]+1, v0[j]+cost)
}
for j := 0; j <= len(b); j++ {
v0[j] = v1[j]
}
}
return v1[len(b)]
}
func min3(a, b, c int) int {
if a < b && a < c {
return a
}
if b < a && b < c {
return b
}
return c
}

View File

@@ -1,26 +0,0 @@
package strutils
import (
"path"
"strings"
)
// SanitizeURI sanitizes a URI reference to ensure it is safe
// It disallows URLs beginning with // or /\ as absolute URLs,
// cleans the URL path to remove any .. or . path elements,
// and ensures the URL starts with a / if it doesn't already
func SanitizeURI(uri string) string {
if uri == "" {
return "/"
}
if strings.HasPrefix(uri, "http://") || strings.HasPrefix(uri, "https://") {
return uri
}
if uri[0] != '/' {
uri = "/" + uri
}
if len(uri) > 1 && uri[0] == '/' && uri[1] != '/' && uri[1] != '\\' {
return path.Clean(uri)
}
return "/"
}

View File

@@ -1,63 +0,0 @@
package strutils
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestSanitizeURI(t *testing.T) {
tests := []struct {
name string
input string
expected string
}{
{
name: "empty string",
input: "",
expected: "/",
},
{
name: "single slash",
input: "/",
expected: "/",
},
{
name: "normal path",
input: "/path/to/resource",
expected: "/path/to/resource",
},
{
name: "path without leading slash",
input: "path/to/resource",
expected: "/path/to/resource",
},
{
name: "path with dot segments",
input: "/path/./to/../resource",
expected: "/path/resource",
},
{
name: "double slash prefix",
input: "//path/to/resource",
expected: "/",
},
{
name: "backslash prefix",
input: "/\\path/to/resource",
expected: "/",
},
{
name: "path with multiple slashes",
input: "/path//to///resource",
expected: "/path/to/resource",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := SanitizeURI(tt.input)
require.Equal(t, tt.expected, result)
})
}
}

View File

@@ -1,238 +0,0 @@
package synk
import (
"sync/atomic"
"unsafe"
"weak"
)
type weakBuf = weak.Pointer[[]byte]
func makeWeak(b *[]byte) weakBuf {
return weak.Make(b)
}
func getBufFromWeak(w weakBuf) []byte {
ptr := w.Value()
if ptr != nil {
return *ptr
}
return nil
}
type BytesPool struct {
sizedPool chan weakBuf
unsizedPool chan weakBuf
initSize int
}
type BytesPoolWithMemory struct {
maxAllocatedSize atomic.Uint32
numShouldShrink atomic.Int32
pool chan weakBuf
}
type sliceInternal struct {
ptr unsafe.Pointer
len int
cap int
}
func sliceStruct(b *[]byte) *sliceInternal {
return (*sliceInternal)(unsafe.Pointer(b))
}
func underlyingPtr(b []byte) unsafe.Pointer {
return sliceStruct(&b).ptr
}
func setCap(b *[]byte, cap int) {
sliceStruct(b).cap = cap
}
func setLen(b *[]byte, len int) {
sliceStruct(b).len = len
}
const (
kb = 1024
mb = 1024 * kb
)
const (
InPoolLimit = 32 * mb
UnsizedAvg = 4 * kb
SizedPoolThreshold = 256 * kb
DropThreshold = 4 * mb
SizedPoolSize = InPoolLimit * 8 / 10 / SizedPoolThreshold
UnsizedPoolSize = InPoolLimit * 2 / 10 / UnsizedAvg
ShouldShrinkThreshold = 10
)
var bytesPool = &BytesPool{
sizedPool: make(chan weakBuf, SizedPoolSize),
unsizedPool: make(chan weakBuf, UnsizedPoolSize),
initSize: UnsizedAvg,
}
var bytesPoolWithMemory = make(chan weakBuf, UnsizedPoolSize)
func GetBytesPool() *BytesPool {
return bytesPool
}
func GetBytesPoolWithUniqueMemory() *BytesPoolWithMemory {
return &BytesPoolWithMemory{
pool: bytesPoolWithMemory,
}
}
func (p *BytesPool) Get() []byte {
for {
select {
case bWeak := <-p.unsizedPool:
bPtr := getBufFromWeak(bWeak)
if bPtr == nil {
continue
}
addReused(cap(bPtr))
return bPtr
default:
addNonPooled(p.initSize)
return make([]byte, 0, p.initSize)
}
}
}
func (p *BytesPoolWithMemory) Get() []byte {
for {
size := int(p.maxAllocatedSize.Load())
select {
case bWeak := <-p.pool:
bPtr := getBufFromWeak(bWeak)
if bPtr == nil {
continue
}
addReused(cap(bPtr))
return bPtr
default:
addNonPooled(size)
return make([]byte, 0, size)
}
}
}
func (p *BytesPool) GetSized(size int) []byte {
for {
select {
case bWeak := <-p.sizedPool:
b := getBufFromWeak(bWeak)
if b == nil {
continue
}
capB := cap(b)
remainingSize := capB - size
if remainingSize == 0 {
addReused(capB)
return b[:size]
}
if remainingSize > 0 { // capB > size (buffer larger than requested)
addReused(size)
p.Put(b[size:capB])
// return the first part and limit the capacity to the requested size
ret := b[:size]
setLen(&ret, size)
setCap(&ret, size)
return ret
}
// size is not enough
select {
case p.sizedPool <- bWeak:
default:
addDropped(cap(b))
// just drop it
}
default:
}
addNonPooled(size)
return make([]byte, size)
}
}
func (p *BytesPool) Put(b []byte) {
size := cap(b)
if size > DropThreshold {
addDropped(size)
return
}
b = b[:0]
if size >= SizedPoolThreshold {
p.put(size, makeWeak(&b), p.sizedPool)
} else {
p.put(size, makeWeak(&b), p.unsizedPool)
}
}
func (p *BytesPoolWithMemory) Put(b []byte) {
capB := uint32(cap(b))
for {
current := p.maxAllocatedSize.Load()
if capB < current {
// Potential shrink case
if p.numShouldShrink.Add(1) > ShouldShrinkThreshold {
if p.maxAllocatedSize.CompareAndSwap(current, capB) {
p.numShouldShrink.Store(0) // reset counter
break
}
p.numShouldShrink.Add(-1) // undo if CAS failed
}
break
} else if capB > current {
// Growing case
if p.maxAllocatedSize.CompareAndSwap(current, capB) {
break
}
// retry if CAS failed
} else {
// equal case - no change needed
break
}
}
if capB > DropThreshold {
addDropped(int(capB))
return
}
b = b[:0]
w := makeWeak(&b)
select {
case p.pool <- w:
default:
addDropped(int(capB))
// just drop it
}
}
//go:inline
func (p *BytesPool) put(size int, w weakBuf, pool chan weakBuf) {
select {
case pool <- w:
default:
addDropped(size)
// just drop it
}
}
func init() {
initPoolStats()
}

View File

@@ -1,69 +0,0 @@
package synk
import (
"slices"
"testing"
)
var sizes = []int{1024, 4096, 16384, 65536, 32 * 1024, 128 * 1024, 512 * 1024, 1024 * 1024, 2 * 1024 * 1024}
func BenchmarkBytesPool_GetSmall(b *testing.B) {
for b.Loop() {
bytesPool.Put(bytesPool.GetSized(1024))
}
}
func BenchmarkBytesPool_MakeSmall(b *testing.B) {
for b.Loop() {
_ = make([]byte, 1024)
}
}
func BenchmarkBytesPool_GetLarge(b *testing.B) {
for b.Loop() {
buf := bytesPool.GetSized(DropThreshold / 2)
buf[0] = 1
bytesPool.Put(buf)
}
}
func BenchmarkBytesPool_GetLargeUnsized(b *testing.B) {
for b.Loop() {
buf := slices.Grow(bytesPool.Get(), DropThreshold/2)
buf = append(buf, 1)
bytesPool.Put(buf)
}
}
func BenchmarkBytesPool_MakeLarge(b *testing.B) {
for b.Loop() {
buf := make([]byte, DropThreshold/2)
buf[0] = 1
_ = buf
}
}
func BenchmarkBytesPool_GetAll(b *testing.B) {
for i := range b.N {
bytesPool.Put(bytesPool.GetSized(sizes[i%len(sizes)]))
}
}
func BenchmarkBytesPool_GetAllUnsized(b *testing.B) {
for i := range b.N {
bytesPool.Put(slices.Grow(bytesPool.Get(), sizes[i%len(sizes)]))
}
}
func BenchmarkBytesPool_MakeAll(b *testing.B) {
for i := range b.N {
_ = make([]byte, sizes[i%len(sizes)])
}
}
func BenchmarkBytesPoolWithMemory(b *testing.B) {
pool := GetBytesPoolWithUniqueMemory()
for i := range b.N {
pool.Put(slices.Grow(pool.Get(), sizes[i%len(sizes)]))
}
}

View File

@@ -1,64 +0,0 @@
//go:build !production
package synk
import (
"os"
"os/signal"
"sync/atomic"
"time"
"github.com/rs/zerolog/log"
"github.com/yusing/godoxy/internal/utils/strutils"
)
type poolCounters struct {
num atomic.Uint64
size atomic.Uint64
}
var (
nonPooled poolCounters
dropped poolCounters
reused poolCounters
)
func addNonPooled(size int) {
nonPooled.num.Add(1)
nonPooled.size.Add(uint64(size))
}
func addReused(size int) {
reused.num.Add(1)
reused.size.Add(uint64(size))
}
func addDropped(size int) {
dropped.num.Add(1)
dropped.size.Add(uint64(size))
}
func initPoolStats() {
go func() {
statsTicker := time.NewTicker(5 * time.Second)
defer statsTicker.Stop()
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt)
for {
select {
case <-sig:
return
case <-statsTicker.C:
log.Info().
Uint64("numReused", reused.num.Load()).
Str("sizeReused", strutils.FormatByteSize(reused.size.Load())).
Uint64("numDropped", dropped.num.Load()).
Str("sizeDropped", strutils.FormatByteSize(dropped.size.Load())).
Uint64("numNonPooled", nonPooled.num.Load()).
Str("sizeNonPooled", strutils.FormatByteSize(nonPooled.size.Load())).
Msg("bytes pool stats")
}
}
}()
}

View File

@@ -1,8 +0,0 @@
//go:build production
package synk
func addNonPooled(size int) {}
func addDropped(size int) {}
func addReused(size int) {}
func initPoolStats() {}

View File

@@ -1,263 +0,0 @@
package synk
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSized(t *testing.T) {
b := bytesPool.GetSized(2 * SizedPoolThreshold)
assert.Equal(t, cap(b), 2*SizedPoolThreshold)
bytesPool.Put(b)
assert.Equal(t, underlyingPtr(b), underlyingPtr(bytesPool.GetSized(SizedPoolThreshold)))
}
func TestUnsized(t *testing.T) {
b := bytesPool.Get()
assert.Equal(t, cap(b), UnsizedAvg)
bytesPool.Put(b)
assert.Equal(t, underlyingPtr(b), underlyingPtr(bytesPool.Get()))
}
func TestGetSizedExactMatch(t *testing.T) {
// Test exact size match reuse
size := SizedPoolThreshold
b1 := bytesPool.GetSized(size)
assert.Equal(t, size, len(b1))
assert.Equal(t, size, cap(b1))
// Put back into pool
bytesPool.Put(b1)
// Get same size - should reuse the same buffer
b2 := bytesPool.GetSized(size)
assert.Equal(t, size, len(b2))
assert.Equal(t, size, cap(b2))
assert.Equal(t, underlyingPtr(b1), underlyingPtr(b2))
}
func TestGetSizedBufferSplit(t *testing.T) {
// Test buffer splitting when capacity > requested size
largeSize := 2 * SizedPoolThreshold
requestedSize := SizedPoolThreshold
// Create a large buffer and put it in pool
b1 := bytesPool.GetSized(largeSize)
assert.Equal(t, largeSize, len(b1))
assert.Equal(t, largeSize, cap(b1))
bytesPool.Put(b1)
// Request smaller size - should split the buffer
b2 := bytesPool.GetSized(requestedSize)
assert.Equal(t, requestedSize, len(b2))
assert.Equal(t, requestedSize, cap(b2)) // capacity should remain the original
assert.Equal(t, underlyingPtr(b1), underlyingPtr(b2))
// The remaining part should be put back in pool
// Request the remaining size to verify
remainingSize := largeSize - requestedSize
b3 := bytesPool.GetSized(remainingSize)
assert.Equal(t, remainingSize, len(b3))
assert.Equal(t, remainingSize, cap(b3))
// Verify the remaining buffer points to the correct memory location
originalPtr := underlyingPtr(b1)
remainingPtr := underlyingPtr(b3)
// The remaining buffer should start at original + requestedSize
expectedOffset := uintptr(originalPtr) + uintptr(requestedSize)
actualOffset := uintptr(remainingPtr)
assert.Equal(t, expectedOffset, actualOffset, "Remaining buffer should point to correct offset")
}
func TestGetSizedSmallRemainder(t *testing.T) {
// Test when remaining size is smaller than SizedPoolThreshold
poolSize := SizedPoolThreshold + 100 // Just slightly larger than threshold
requestedSize := SizedPoolThreshold
// Create buffer and put in pool
b1 := bytesPool.GetSized(poolSize)
bytesPool.Put(b1)
// Request size that leaves small remainder
b2 := bytesPool.GetSized(requestedSize)
assert.Equal(t, requestedSize, len(b2))
assert.Equal(t, requestedSize, cap(b2))
// The small remainder (100 bytes) should NOT be put back in sized pool
// Try to get the remainder size - should create new buffer
b3 := bytesPool.GetSized(100)
assert.Equal(t, 100, len(b3))
assert.Equal(t, 100, cap(b3))
assert.NotEqual(t, underlyingPtr(b2), underlyingPtr(b3))
}
func TestGetSizedSmallBufferBypass(t *testing.T) {
// Test that small buffers (< SizedPoolThreshold) don't use sized pool
smallSize := SizedPoolThreshold - 1
b1 := bytesPool.GetSized(smallSize)
assert.Equal(t, smallSize, len(b1))
assert.Equal(t, smallSize, cap(b1))
b2 := bytesPool.GetSized(smallSize)
assert.Equal(t, smallSize, len(b2))
assert.Equal(t, smallSize, cap(b2))
// Should be different buffers (not pooled)
assert.NotEqual(t, underlyingPtr(b1), underlyingPtr(b2))
}
func TestGetSizedBufferTooSmall(t *testing.T) {
// Test when pool buffer is smaller than requested size
smallSize := SizedPoolThreshold
largeSize := 2 * SizedPoolThreshold
// Put small buffer in pool
b1 := bytesPool.GetSized(smallSize)
bytesPool.Put(b1)
// Request larger size - should create new buffer, not reuse small one
b2 := bytesPool.GetSized(largeSize)
assert.Equal(t, largeSize, len(b2))
assert.Equal(t, largeSize, cap(b2))
assert.NotEqual(t, underlyingPtr(b1), underlyingPtr(b2))
// The small buffer should still be in pool
b3 := bytesPool.GetSized(smallSize)
assert.Equal(t, underlyingPtr(b1), underlyingPtr(b3))
}
func TestGetSizedMultipleSplits(t *testing.T) {
// Test multiple sequential splits of the same buffer
hugeSize := 4 * SizedPoolThreshold
splitSize := SizedPoolThreshold
// Create huge buffer
b1 := bytesPool.GetSized(hugeSize)
originalPtr := underlyingPtr(b1)
bytesPool.Put(b1)
// Split it into smaller pieces
pieces := make([][]byte, 0, 4)
for i := range 4 {
piece := bytesPool.GetSized(splitSize)
pieces = append(pieces, piece)
// Each piece should point to the correct offset
expectedOffset := uintptr(originalPtr) + uintptr(i*splitSize)
actualOffset := uintptr(underlyingPtr(piece))
assert.Equal(t, expectedOffset, actualOffset, "Piece %d should point to correct offset", i)
assert.Equal(t, splitSize, len(piece))
assert.Equal(t, splitSize, cap(piece))
}
// All pieces should have the same underlying capacity
for i, piece := range pieces {
assert.Equal(t, splitSize, cap(piece), "Piece %d should have correct capacity", i)
}
}
func TestGetSizedMemorySafety(t *testing.T) {
// Test that split buffers don't interfere with each other
totalSize := 3 * SizedPoolThreshold
firstSize := SizedPoolThreshold
// Create buffer and split it
b1 := bytesPool.GetSized(totalSize)
// Fill with test data
for i := range len(b1) {
b1[i] = byte(i % 256)
}
bytesPool.Put(b1)
// Get first part
first := bytesPool.GetSized(firstSize)
assert.Equal(t, firstSize, len(first))
// Verify data integrity
for i := range len(first) {
assert.Equal(t, byte(i%256), first[i], "Data should be preserved after split")
}
// Get remaining part
remainingSize := totalSize - firstSize
remaining := bytesPool.GetSized(remainingSize)
assert.Equal(t, remainingSize, len(remaining))
// Verify remaining data
for i := range len(remaining) {
expected := byte((i + firstSize) % 256)
assert.Equal(t, expected, remaining[i], "Remaining data should be preserved")
}
}
func TestGetSizedCapacityLimiting(t *testing.T) {
// Test that returned buffers have limited capacity to prevent overwrites
largeSize := 2 * SizedPoolThreshold
requestedSize := SizedPoolThreshold
// Create large buffer and put in pool
b1 := bytesPool.GetSized(largeSize)
bytesPool.Put(b1)
// Get smaller buffer from the split
b2 := bytesPool.GetSized(requestedSize)
assert.Equal(t, requestedSize, len(b2))
assert.Equal(t, requestedSize, cap(b2), "Returned buffer should have limited capacity")
// Try to append data - should not be able to overwrite beyond capacity
original := make([]byte, len(b2))
copy(original, b2)
// This append should force a new allocation since capacity is limited
b2 = append(b2, 1, 2, 3, 4, 5)
assert.Greater(t, len(b2), requestedSize, "Buffer should have grown")
// Get the remaining buffer to verify it wasn't affected
remainingSize := largeSize - requestedSize
b3 := bytesPool.GetSized(remainingSize)
assert.Equal(t, remainingSize, len(b3))
assert.Equal(t, remainingSize, cap(b3), "Remaining buffer should have limited capacity")
}
func TestGetSizedAppendSafety(t *testing.T) {
// Test that appending to returned buffer doesn't affect remaining buffer
totalSize := 4 * SizedPoolThreshold
firstSize := SizedPoolThreshold
// Create buffer with specific pattern
b1 := bytesPool.GetSized(totalSize)
for i := range len(b1) {
b1[i] = byte(100 + i%100)
}
bytesPool.Put(b1)
// Get first part
first := bytesPool.GetSized(firstSize)
assert.Equal(t, firstSize, cap(first), "First part should have limited capacity")
// Store original first part content
originalFirst := make([]byte, len(first))
copy(originalFirst, first)
// Get remaining part to establish its state
remaining := bytesPool.GetSized(SizedPoolThreshold)
// Store original remaining content
originalRemaining := make([]byte, len(remaining))
copy(originalRemaining, remaining)
// Now try to append to first - this should not affect remaining buffers
// since capacity is limited
first = append(first, make([]byte, 1000)...)
// Verify remaining buffer content is unchanged
for i := range len(originalRemaining) {
assert.Equal(t, originalRemaining[i], remaining[i],
"Remaining buffer should be unaffected by append to first buffer")
}
}

View File

@@ -1,49 +0,0 @@
package trie
import (
"sync/atomic"
)
// AnyValue is a wrapper of atomic.Value
// It is used to store values in trie nodes
// And allowed to assign to empty struct value when node
// is not an end node anymore
type AnyValue struct {
v atomic.Value
}
type zeroValue struct{}
var zero zeroValue
func (av *AnyValue) Store(v any) {
if v == nil {
av.v.Store(zero)
return
}
defer panicInvalidAssignment()
av.v.Store(v)
}
func (av *AnyValue) Swap(v any) any {
defer panicInvalidAssignment()
return av.v.Swap(v)
}
func (av *AnyValue) Load() any {
switch v := av.v.Load().(type) {
case zeroValue:
return nil
default:
return v
}
}
func (av *AnyValue) IsNil() bool {
switch v := av.v.Load().(type) {
case zeroValue:
return true // assigned nil manually
default:
return v == nil // uninitialized
}
}

View File

@@ -1,13 +0,0 @@
//go:build debug
package trie
import "fmt"
func panicInvalidAssignment() {
// assigned anything after manually assigning nil
// will panic because of type mismatch (zeroValue and v.(type))
if r := recover(); r != nil {
panic(fmt.Errorf("attempt to assign non-nil value on edge node or assigning mismatched type: %v", r))
}
}

View File

@@ -1,7 +0,0 @@
//go:build !debug
package trie
func panicInvalidAssignment() {
// no-op
}

View File

@@ -1,16 +0,0 @@
package trie
import (
"testing"
)
func TestStoreNil(t *testing.T) {
var v AnyValue
v.Store(nil)
if v.Load() != nil {
t.Fatal("expected nil")
}
if !v.IsNil() {
t.Fatal("expected true")
}
}

View File

@@ -1,21 +0,0 @@
package trie
import (
"encoding/json"
"maps"
)
func (r *Root) MarshalJSON() ([]byte, error) {
return json.Marshal(maps.Collect(r.Walk))
}
func (r *Root) UnmarshalJSON(data []byte) error {
var m map[string]any
if err := json.Unmarshal(data, &m); err != nil {
return err
}
for k, v := range m {
r.Store(NewKey(k), v)
}
return nil
}

View File

@@ -1,36 +0,0 @@
package trie
import (
"encoding/json"
"testing"
)
func TestMarshalUnmarshalJSON(t *testing.T) {
trie := NewTrie()
data := map[string]any{
"foo.bar": 42.12,
"foo.baz": "hello",
"qwe.rt.yu.io": 123.45,
}
for k, v := range data {
trie.Store(NewKey(k), v)
}
// MarshalJSON
bytesFromTrie, err := json.Marshal(trie)
if err != nil {
t.Fatalf("json.Marshal error: %v", err)
}
// UnmarshalJSON
newTrie := NewTrie()
if err := json.Unmarshal(bytesFromTrie, newTrie); err != nil {
t.Fatalf("UnmarshalJSON error: %v", err)
}
for k, v := range data {
got, ok := newTrie.Get(NewKey(k))
if !ok || got != v {
t.Errorf("UnmarshalJSON: key %q got %v, want %v", k, got, v)
}
}
}

View File

@@ -1,80 +0,0 @@
package trie
import (
"slices"
"strings"
"github.com/yusing/godoxy/internal/utils/strutils"
)
type Key struct {
segments []string // escaped segments
full string // unescaped original key
hasWildcard bool
}
func Namespace(ns string) *Key {
return &Key{
segments: []string{ns},
full: ns,
hasWildcard: false,
}
}
func NewKey(keyStr string) *Key {
key := &Key{
segments: strutils.SplitRune(keyStr, '.'),
full: keyStr,
}
for _, seg := range key.segments {
if seg == "*" || seg == "**" {
key.hasWildcard = true
}
}
return key
}
func EscapeSegment(seg string) string {
var sb strings.Builder
for _, r := range seg {
switch r {
case '.', '*':
sb.WriteString("__")
default:
sb.WriteRune(r)
}
}
return sb.String()
}
func (ns Key) With(segment string) *Key {
ns.segments = append(ns.segments, segment)
ns.full = ns.full + "." + segment
ns.hasWildcard = ns.hasWildcard || segment == "*" || segment == "**"
return &ns
}
func (ns Key) WithEscaped(segment string) *Key {
ns.segments = append(ns.segments, EscapeSegment(segment))
ns.full = ns.full + "." + segment
return &ns
}
func (ns *Key) NumSegments() int {
return len(ns.segments)
}
func (ns *Key) HasWildcard() bool {
return ns.hasWildcard
}
func (ns *Key) String() string {
return ns.full
}
func (ns *Key) Clone() *Key {
clone := *ns
clone.segments = slices.Clone(ns.segments)
clone.full = strings.Clone(ns.full)
return &clone
}

View File

@@ -1,86 +0,0 @@
package trie
import (
"reflect"
"testing"
)
func TestNamespace(t *testing.T) {
k := Namespace("foo")
if k.String() != "foo" {
t.Errorf("Namespace.String() = %q, want %q", k.String(), "foo")
}
if k.NumSegments() != 1 {
t.Errorf("Namespace.NumSegments() = %d, want 1", k.NumSegments())
}
if k.HasWildcard() {
t.Error("Namespace.HasWildcard() = true, want false")
}
}
func TestNewKey(t *testing.T) {
k := NewKey("a.b.c")
if !reflect.DeepEqual(k.segments, []string{"a", "b", "c"}) {
t.Errorf("NewKey.segments = %v, want [a b c]", k.segments)
}
if k.String() != "a.b.c" {
t.Errorf("NewKey.String() = %q, want %q", k.String(), "a.b.c")
}
if k.NumSegments() != 3 {
t.Errorf("NewKey.NumSegments() = %d, want 3", k.NumSegments())
}
if k.HasWildcard() {
t.Error("NewKey.HasWildcard() = true, want false")
}
kw := NewKey("foo.*.bar")
if !kw.HasWildcard() {
t.Error("NewKey.HasWildcard() = false, want true for wildcard")
}
}
func TestWithAndWithEscaped(t *testing.T) {
k := Namespace("foo")
k2 := k.Clone().With("bar")
if k2.String() != "foo.bar" {
t.Errorf("With.String() = %q, want %q", k2.String(), "foo.bar")
}
if k2.NumSegments() != 2 {
t.Errorf("With.NumSegments() = %d, want 2", k2.NumSegments())
}
k3 := Namespace("foo").WithEscaped("b.r*")
esc := EscapeSegment("b.r*")
if k3.segments[1] != esc {
t.Errorf("WithEscaped.segment = %q, want %q", k3.segments[1], esc)
}
}
func TestEscapeSegment(t *testing.T) {
cases := map[string]string{
"foo": "foo",
"f.o": "f__o",
"*": "__",
"a*b.c": "a__b__c",
}
for in, want := range cases {
if got := EscapeSegment(in); got != want {
t.Errorf("EscapeSegment(%q) = %q, want %q", in, got, want)
}
}
}
func TestClone(t *testing.T) {
k := NewKey("x.y.z")
cl := k.Clone()
if !reflect.DeepEqual(k, cl) {
t.Errorf("Clone() = %v, want %v", cl, k)
}
cl = cl.With("new")
if cl == k {
t.Error("Clone() returns same pointer")
}
if reflect.DeepEqual(k.segments, cl.segments) {
t.Error("Clone is not deep copy: segments slice is shared")
}
}

View File

@@ -1,54 +0,0 @@
package trie
import (
"github.com/puzpuzpuz/xsync/v4"
)
type Node struct {
key string
children *xsync.Map[string, *Node] // lock-free map which allows concurrent access
value AnyValue // only end nodes have values
}
func mayPrefix(key, part string) string {
if key == "" {
return part
}
return key + "." + part
}
func (node *Node) newChild(part string) *Node {
return &Node{
key: mayPrefix(node.key, part),
children: xsync.NewMap[string, *Node](),
}
}
func (node *Node) Get(key *Key) (any, bool) {
for _, seg := range key.segments {
child, ok := node.children.Load(seg)
if !ok {
return nil, false
}
node = child
}
v := node.value.Load()
if v == nil {
return nil, false
}
return v, true
}
func (node *Node) loadOrStore(key *Key, newFunc func() any) (*Node, bool) {
for i, seg := range key.segments {
child, _ := node.children.LoadOrCompute(seg, func() (*Node, bool) {
newNode := node.newChild(seg)
if i == len(key.segments)-1 {
newNode.value.Store(newFunc())
}
return newNode, false
})
node = child
}
return node, false
}

View File

@@ -1,44 +0,0 @@
package trie
import "github.com/puzpuzpuz/xsync/v4"
type Root struct {
*Node
cached *xsync.Map[string, *Node]
}
func NewTrie() *Root {
return &Root{
Node: &Node{
children: xsync.NewMap[string, *Node](),
},
cached: xsync.NewMap[string, *Node](),
}
}
func (r *Root) getNode(key *Key, newFunc func() any) *Node {
if key.hasWildcard {
panic("should not call Load or Store on a key with any wildcard: " + key.full)
}
node, _ := r.cached.LoadOrCompute(key.full, func() (*Node, bool) {
return r.Node.loadOrStore(key, newFunc)
})
return node
}
// LoadOrStore loads or stores the value for the key
// Returns the value loaded/stored
func (r *Root) LoadOrStore(key *Key, newFunc func() any) any {
return r.getNode(key, newFunc).value.Load()
}
// LoadAndStore loads or stores the value for the key
// Returns the old value if exists, nil otherwise
func (r *Root) LoadAndStore(key *Key, val any) any {
return r.getNode(key, func() any { return val }).value.Swap(val)
}
// Store stores the value for the key
func (r *Root) Store(key *Key, val any) {
r.getNode(key, func() any { return val }).value.Store(val)
}

View File

@@ -1,35 +0,0 @@
package trie
import "testing"
var nsCPU = Namespace("cpu")
// Test functions
func TestLoadOrStore(t *testing.T) {
trie := NewTrie()
ptr := trie.LoadOrStore(nsCPU, func() any {
return new(int)
})
if ptr == nil {
t.Fatal("expected pointer to be created")
}
if ptr != trie.LoadOrStore(nsCPU, func() any {
return new(int)
}) {
t.Fatal("expected same pointer to be returned")
}
got, ok := trie.Get(nsCPU)
if !ok || got != ptr {
t.Fatal("expected same pointer to be returned")
}
}
func TestStore(t *testing.T) {
trie := NewTrie()
ptr := new(int)
trie.Store(nsCPU, ptr)
got, ok := trie.Get(nsCPU)
if !ok || got != ptr {
t.Fatal("expected same pointer to be returned")
}
}

View File

@@ -1,109 +0,0 @@
package trie
import (
"maps"
"slices"
)
type (
YieldFunc = func(part string, value any) bool
YieldKeyFunc = func(key string) bool
Iterator = func(YieldFunc)
KeyIterator = func(YieldKeyFunc)
)
// WalkAll walks all nodes in the trie, yields full key and series
func (node *Node) Walk(yield YieldFunc) {
node.walkAll(yield)
}
func (node *Node) walkAll(yield YieldFunc) bool {
if !node.value.IsNil() {
return yield(node.key, node.value.Load())
}
for _, v := range node.children.Range {
if !v.walkAll(yield) {
return false
}
}
return true
}
func (node *Node) WalkKeys(yield YieldKeyFunc) {
node.walkKeys(yield)
}
func (node *Node) walkKeys(yield YieldKeyFunc) bool {
if !node.value.IsNil() {
return !yield(node.key)
}
for _, v := range node.children.Range {
if !v.walkKeys(yield) {
return false
}
}
return true
}
func (node *Node) Keys() []string {
return slices.Collect(node.WalkKeys)
}
func (node *Node) Map() map[string]any {
return maps.Collect(node.Walk)
}
func (tree Root) Query(key *Key) Iterator {
if !key.hasWildcard {
return func(yield YieldFunc) {
if v, ok := tree.Get(key); ok {
yield(key.full, v)
}
}
}
return func(yield YieldFunc) {
tree.walkQuery(key.segments, tree.Node, yield, false)
}
}
func (tree Root) walkQuery(patternParts []string, node *Node, yield YieldFunc, recursive bool) bool {
if len(patternParts) == 0 {
if !node.value.IsNil() { // end
if !yield(node.key, node.value.Load()) {
return true
}
} else if recursive {
return tree.walkAll(yield)
}
return true
}
pat := patternParts[0]
switch pat {
case "**":
// ** matches zero or more segments
// Option 1: ** matches zero segment, move to next pattern part
if !tree.walkQuery(patternParts[1:], node, yield, false) {
return false
}
// Option 2: ** matches one or more segments
for _, child := range node.children.Range {
if !tree.walkQuery(patternParts, child, yield, true) {
return false
}
}
case "*":
// * matches any single segment
for _, child := range node.children.Range {
if !tree.walkQuery(patternParts[1:], child, yield, false) {
return false
}
}
default:
// Exact match
if child, ok := node.children.Load(pat); ok {
return tree.walkQuery(patternParts[1:], child, yield, false)
}
}
return true
}

View File

@@ -1,113 +0,0 @@
package trie_test
import (
"maps"
"slices"
"testing"
. "github.com/yusing/godoxy/internal/utils/trie"
)
// Test data for trie tests
var (
testData = map[string]any{
"routes.route1": new(int),
"routes.route2": new(int),
"routes.route3": new(int),
"system.cpu_average": new(int),
"system.mem.used": new(int),
"system.mem.percentage_used": new(int),
"system.disks.disk0.used": new(int),
"system.disks.disk0.percentage_used": new(int),
"system.disks.disk1.used": new(int),
"system.disks.disk1.percentage_used": new(int),
}
testWalkDisksWants = []string{
"system.disks.disk0.used",
"system.disks.disk0.percentage_used",
"system.disks.disk1.used",
"system.disks.disk1.percentage_used",
}
testWalkDisksUsedWants = []string{
"system.disks.disk0.used",
"system.disks.disk1.used",
}
testUsedWants = []string{
"system.mem.used",
"system.disks.disk0.used",
"system.disks.disk1.used",
}
)
// Helper functions
func keys(m map[string]any) []string {
return slices.Sorted(maps.Keys(m))
}
func keysEqual(m map[string]any, want []string) bool {
slices.Sort(want)
return slices.Equal(keys(m), want)
}
func TestWalkAll(t *testing.T) {
trie := NewTrie()
for key, series := range testData {
trie.Store(NewKey(key), series)
}
walked := maps.Collect(trie.Walk)
for k, v := range testData {
if _, ok := walked[k]; !ok {
t.Fatalf("expected key %s not found", k)
}
if v != walked[k] {
t.Fatalf("key %s expected %v, got %v", k, v, walked[k])
}
}
}
func TestWalk(t *testing.T) {
trie := NewTrie()
for key, series := range testData {
trie.Store(NewKey(key), series)
}
tests := []struct {
query string
want []string
wantEmpty bool
}{
{"system.disks.*.used", testWalkDisksUsedWants, false},
{"system.*.*.used", testWalkDisksUsedWants, false},
{"*.disks.*.used", testWalkDisksUsedWants, false},
{"*.*.*.used", testWalkDisksUsedWants, false},
{"system.disks.**", testWalkDisksWants, false}, // note: original code uses '*' not '**'
{"system.disks", nil, true},
{"**.used", testUsedWants, false},
}
for _, tc := range tests {
t.Run(tc.query, func(t *testing.T) {
got := maps.Collect(trie.Query(NewKey(tc.query)))
if tc.wantEmpty {
if len(got) != 0 {
t.Fatalf("expected empty, got %v", keys(got))
}
return
}
if !keysEqual(got, tc.want) {
t.Fatalf("expected %v, got %v", tc.want, keys(got))
}
for _, k := range tc.want {
want, ok := testData[k]
if !ok {
t.Fatalf("expected key %s not found", k)
}
if got[k] != want {
t.Fatalf("key %s expected %v, got %v", k, want, got[k])
}
}
})
}
}