mirror of
https://github.com/yusing/godoxy.git
synced 2026-01-15 23:03:50 +01:00
Compare commits
10 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c72f66d64b | ||
|
|
59bc342a40 | ||
|
|
e11579df10 | ||
|
|
6a8f6fb4b5 | ||
|
|
8f20bd3840 | ||
|
|
f1abb745fe | ||
|
|
cb2990f6e8 | ||
|
|
fb2f850311 | ||
|
|
2b9c0f09ee | ||
|
|
efe3eb4ce7 |
11
.env.example
11
.env.example
@@ -1,3 +1,6 @@
|
||||
# docker image tag (latest, nightly)
|
||||
TAG=latest
|
||||
|
||||
# set timezone to get correct log timestamp
|
||||
TZ=ETC/UTC
|
||||
|
||||
@@ -47,8 +50,12 @@ GODOXY_API_ADDR=127.0.0.1:8888
|
||||
# Frontend listening port
|
||||
GODOXY_FRONTEND_PORT=3000
|
||||
|
||||
# Prometheus Metrics
|
||||
GODOXY_PROMETHEUS_ENABLED=true
|
||||
# Frontend aliases (subdomains / FQDNs, e.g. godoxy, godoxy.domain.com)
|
||||
GODOXY_FRONTEND_ALIASES=godoxy
|
||||
|
||||
# Docker socket
|
||||
# /var/run/podman/podman.sock for podman
|
||||
DOCKER_SOCKET=/var/run/docker.sock
|
||||
|
||||
# Debug mode
|
||||
GODOXY_DEBUG=false
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
services:
|
||||
frontend:
|
||||
image: ghcr.io/yusing/godoxy-frontend:latest
|
||||
image: ghcr.io/yusing/godoxy-frontend:${TAG:-latest}
|
||||
container_name: godoxy-frontend
|
||||
restart: unless-stopped
|
||||
network_mode: host # do not change this
|
||||
@@ -13,7 +13,7 @@ services:
|
||||
|
||||
# modify below to fit your needs
|
||||
labels:
|
||||
proxy.aliases: godoxy
|
||||
proxy.aliases: ${GODOXY_FRONTEND_ALIASES:-godoxy}
|
||||
proxy.godoxy.port: ${GODOXY_FRONTEND_PORT:-3000}
|
||||
# proxy.godoxy.middlewares.cidr_whitelist: |
|
||||
# status: 403
|
||||
@@ -24,13 +24,13 @@ services:
|
||||
# - 192.168.0.0/16
|
||||
# - 172.16.0.0/12
|
||||
app:
|
||||
image: ghcr.io/yusing/godoxy:latest
|
||||
image: ghcr.io/yusing/godoxy:${TAG:-latest}
|
||||
container_name: godoxy
|
||||
restart: always
|
||||
network_mode: host # do not change this
|
||||
env_file: .env
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- ${DOCKER_SOCKET:-/var/run/docker.sock}:/var/run/docker.sock
|
||||
- ./config:/app/config
|
||||
- ./logs:/app/logs
|
||||
- ./error_pages:/app/error_pages
|
||||
|
||||
@@ -3,11 +3,9 @@ package acl
|
||||
import (
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
acl "github.com/yusing/go-proxy/internal/acl/types"
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
var cityCache = xsync.NewMapOf[string, *acl.City]()
|
||||
var numCachedLookup atomic.Uint64
|
||||
|
||||
func (cfg *MaxMindConfig) lookupCity(ip *acl.IPInfo) (*acl.City, bool) {
|
||||
if ip.City != nil {
|
||||
@@ -20,7 +18,7 @@ func (cfg *MaxMindConfig) lookupCity(ip *acl.IPInfo) (*acl.City, bool) {
|
||||
|
||||
city, ok := cityCache.Load(ip.Str)
|
||||
if ok {
|
||||
numCachedLookup.Inc()
|
||||
ip.City = city
|
||||
return city, true
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"github.com/rs/zerolog"
|
||||
acl "github.com/yusing/go-proxy/internal/acl/types"
|
||||
"github.com/yusing/go-proxy/internal/common"
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
"github.com/yusing/go-proxy/internal/logging/accesslog"
|
||||
@@ -63,7 +64,7 @@ type checkCache struct {
|
||||
const cacheTTL = 1 * time.Minute
|
||||
|
||||
func (c *checkCache) Expired() bool {
|
||||
return c.created.Add(cacheTTL).After(utils.TimeNow())
|
||||
return c.created.Add(cacheTTL).Before(utils.TimeNow())
|
||||
}
|
||||
|
||||
//TODO: add stats
|
||||
@@ -153,7 +154,10 @@ func (c *Config) Start(parent *task.Task) gperr.Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *config) cacheRecord(info *acl.IPInfo, allow bool) {
|
||||
func (c *Config) cacheRecord(info *acl.IPInfo, allow bool) {
|
||||
if common.ForceResolveCountry && info.City == nil {
|
||||
c.MaxMind.lookupCity(info)
|
||||
}
|
||||
c.ipCache.Store(info.Str, &checkCache{
|
||||
IPInfo: info,
|
||||
allow: allow,
|
||||
@@ -175,7 +179,7 @@ func (c *Config) IPAllowed(ip net.IP) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// always allow private and loopback
|
||||
// always allow loopback
|
||||
// loopback is not logged
|
||||
if ip.IsLoopback() {
|
||||
return true
|
||||
|
||||
@@ -54,7 +54,7 @@ func (cfg *MaxMindConfig) LoadMaxMindDB(parent task.Parent) gperr.Error {
|
||||
|
||||
path := dbPath(cfg.Database)
|
||||
reader, err := maxmindDBOpen(path)
|
||||
exists := true
|
||||
valid := true
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, os.ErrNotExist):
|
||||
@@ -65,20 +65,19 @@ func (cfg *MaxMindConfig) LoadMaxMindDB(parent task.Parent) gperr.Error {
|
||||
return gperr.Wrap(err)
|
||||
}
|
||||
}
|
||||
exists = false
|
||||
valid = false
|
||||
}
|
||||
|
||||
if !exists {
|
||||
if !valid {
|
||||
cfg.logger.Info().Msg("MaxMind DB not found/invalid, downloading...")
|
||||
reader, err = cfg.download()
|
||||
if err != nil {
|
||||
if err = cfg.download(); err != nil {
|
||||
return ErrDownloadFailure.With(err)
|
||||
}
|
||||
} else {
|
||||
cfg.logger.Info().Msg("MaxMind DB loaded")
|
||||
cfg.db.Reader = reader
|
||||
go cfg.scheduleUpdate(parent)
|
||||
}
|
||||
cfg.logger.Info().Msg("MaxMind DB loaded")
|
||||
|
||||
cfg.db.Reader = reader
|
||||
go cfg.scheduleUpdate(parent)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -137,17 +136,10 @@ func (cfg *MaxMindConfig) update() {
|
||||
Time("latest", remoteLastModified.Local()).
|
||||
Time("current", cfg.lastUpdate).
|
||||
Msg("MaxMind DB update available")
|
||||
reader, err := cfg.download()
|
||||
if err != nil {
|
||||
if err = cfg.download(); err != nil {
|
||||
cfg.logger.Err(err).Msg("failed to update MaxMind DB")
|
||||
return
|
||||
}
|
||||
cfg.db.Lock()
|
||||
cfg.db.Close()
|
||||
cfg.db.Reader = reader
|
||||
cfg.setLastUpdate(*remoteLastModified)
|
||||
cfg.db.Unlock()
|
||||
|
||||
cfg.logger.Info().Msg("MaxMind DB updated")
|
||||
}
|
||||
|
||||
@@ -190,57 +182,87 @@ func (cfg *MaxMindConfig) checkLastest() (lastModifiedT *time.Time, err error) {
|
||||
return &lastModifiedTime, nil
|
||||
}
|
||||
|
||||
func (cfg *MaxMindConfig) download() (*maxminddb.Reader, error) {
|
||||
func (cfg *MaxMindConfig) download() error {
|
||||
resp, err := newReq(cfg, http.MethodGet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("%w: %d", ErrResponseNotOK, resp.StatusCode)
|
||||
return fmt.Errorf("%w: %d", ErrResponseNotOK, resp.StatusCode)
|
||||
}
|
||||
|
||||
path := dbPath(cfg.Database)
|
||||
tmpPath := path + "-tmp.tar.gz"
|
||||
file, err := os.OpenFile(tmpPath, os.O_CREATE|os.O_WRONLY, 0o644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbFile := dbPath(cfg.Database)
|
||||
tmpGZPath := dbFile + "-tmp.tar.gz"
|
||||
tmpDBPath := dbFile + "-tmp"
|
||||
|
||||
cfg.logger.Info().Msg("MaxMind DB downloading...")
|
||||
|
||||
_, err = io.Copy(file, resp.Body)
|
||||
if err != nil {
|
||||
file.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
file.Close()
|
||||
|
||||
// extract .tar.gz and move only the dbFilename to path
|
||||
err = extractFileFromTarGz(tmpPath, dbFilename(cfg.Database), path)
|
||||
if err != nil {
|
||||
return nil, gperr.New("failed to extract database from archive").With(err)
|
||||
}
|
||||
// cleanup the tar.gz file
|
||||
_ = os.Remove(tmpPath)
|
||||
|
||||
db, err := maxmindDBOpen(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func extractFileFromTarGz(tarGzPath, targetFilename, destPath string) error {
|
||||
f, err := os.Open(tarGzPath)
|
||||
tmpGZFile, err := os.OpenFile(tmpGZPath, os.O_CREATE|os.O_RDWR, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
gzr, err := gzip.NewReader(f)
|
||||
// cleanup the tar.gz file
|
||||
defer func() {
|
||||
_ = tmpGZFile.Close()
|
||||
_ = os.Remove(tmpGZPath)
|
||||
}()
|
||||
|
||||
cfg.logger.Info().Msg("MaxMind DB downloading...")
|
||||
|
||||
_, err = io.Copy(tmpGZFile, resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tmpGZFile.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// extract .tar.gz and to database
|
||||
err = extractFileFromTarGz(tmpGZFile, dbFilename(cfg.Database), tmpDBPath)
|
||||
|
||||
if err != nil {
|
||||
return gperr.New("failed to extract database from archive").With(err)
|
||||
}
|
||||
|
||||
// test if the downloaded database is valid
|
||||
db, err := maxmindDBOpen(tmpDBPath)
|
||||
if err != nil {
|
||||
_ = os.Remove(tmpDBPath)
|
||||
return err
|
||||
}
|
||||
|
||||
db.Close()
|
||||
err = os.Rename(tmpDBPath, dbFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.db.Lock()
|
||||
defer cfg.db.Unlock()
|
||||
if cfg.db.Reader != nil {
|
||||
cfg.db.Reader.Close()
|
||||
}
|
||||
cfg.db.Reader, err = maxmindDBOpen(dbFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lastModifiedStr := resp.Header.Get("Last-Modified")
|
||||
lastModifiedTime, err := time.Parse(http.TimeFormat, lastModifiedStr)
|
||||
if err == nil {
|
||||
cfg.setLastUpdate(lastModifiedTime)
|
||||
}
|
||||
|
||||
cfg.logger.Info().Msg("MaxMind DB downloaded")
|
||||
return nil
|
||||
}
|
||||
|
||||
func extractFileFromTarGz(tarGzFile *os.File, targetFilename, destPath string) error {
|
||||
defer tarGzFile.Close()
|
||||
|
||||
gzr, err := gzip.NewReader(tarGzFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package acl
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -144,9 +146,17 @@ func Test_MaxMindConfig_download(t *testing.T) {
|
||||
logger: zerolog.Nop(),
|
||||
}
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
io.Copy(w, strings.NewReader("FAKEMMDB"))
|
||||
gz := gzip.NewWriter(w)
|
||||
t := tar.NewWriter(gz)
|
||||
t.WriteHeader(&tar.Header{
|
||||
Name: dbFilename(MaxMindGeoLite),
|
||||
})
|
||||
t.Write([]byte("1234"))
|
||||
t.Close()
|
||||
gz.Close()
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
oldURL := dbURL
|
||||
dbURL = func(MaxMindDatabaseType) string { return server.URL }
|
||||
defer func() { dbURL = oldURL }()
|
||||
@@ -163,26 +173,26 @@ func Test_MaxMindConfig_download(t *testing.T) {
|
||||
}
|
||||
defer func() { maxmindDBOpen = origOpen }()
|
||||
|
||||
rw := &fakeReadCloser{}
|
||||
req, err := http.NewRequest(http.MethodGet, server.URL, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("newReq() error = %v", err)
|
||||
}
|
||||
|
||||
rw := httptest.NewRecorder()
|
||||
oldNewReq := newReq
|
||||
newReq = func(cfg *MaxMindConfig, method string) (*http.Response, error) {
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: rw,
|
||||
}, nil
|
||||
server.Config.Handler.ServeHTTP(rw, req)
|
||||
return rw.Result(), nil
|
||||
}
|
||||
defer func() { newReq = oldNewReq }()
|
||||
|
||||
db, err := cfg.download()
|
||||
err = cfg.download()
|
||||
if err != nil {
|
||||
t.Fatalf("download() error = %v", err)
|
||||
}
|
||||
if db == nil {
|
||||
if cfg.db.Reader == nil {
|
||||
t.Error("expected db instance")
|
||||
}
|
||||
if !rw.closed {
|
||||
t.Error("expected rw to be closed")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MaxMindConfig_loadMaxMindDB(t *testing.T) {
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package acl
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TCPListener struct {
|
||||
@@ -9,6 +11,17 @@ type TCPListener struct {
|
||||
lis net.Listener
|
||||
}
|
||||
|
||||
type noConn struct{}
|
||||
|
||||
func (noConn) Read(b []byte) (int, error) { return 0, io.EOF }
|
||||
func (noConn) Write(b []byte) (int, error) { return 0, io.EOF }
|
||||
func (noConn) Close() error { return nil }
|
||||
func (noConn) LocalAddr() net.Addr { return nil }
|
||||
func (noConn) RemoteAddr() net.Addr { return nil }
|
||||
func (noConn) SetDeadline(t time.Time) error { return nil }
|
||||
func (noConn) SetReadDeadline(t time.Time) error { return nil }
|
||||
func (noConn) SetWriteDeadline(t time.Time) error { return nil }
|
||||
|
||||
func (cfg *Config) WrapTCP(lis net.Listener) net.Listener {
|
||||
if cfg == nil {
|
||||
return lis
|
||||
@@ -32,11 +45,11 @@ func (s *TCPListener) Accept() (net.Conn, error) {
|
||||
if !ok {
|
||||
// Not a TCPAddr, drop
|
||||
c.Close()
|
||||
return nil, nil
|
||||
return noConn{}, nil
|
||||
}
|
||||
if !s.acl.IPAllowed(addr.IP) {
|
||||
c.Close()
|
||||
return nil, nil
|
||||
return noConn{}, nil
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ func RequireAuth(next http.HandlerFunc) http.HandlerFunc {
|
||||
|
||||
func AuthCheckHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if err := defaultAuth.CheckToken(r); err != nil {
|
||||
http.Redirect(w, r, "/v1/auth/login", http.StatusFound)
|
||||
defaultAuth.LoginHandler(w, r)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
@@ -82,10 +82,6 @@ func (cfg *Config) Validate() gperr.Error {
|
||||
}
|
||||
|
||||
func (cfg *Config) GetLegoConfig() (*User, *lego.Config, gperr.Error) {
|
||||
if cfg == nil {
|
||||
cfg = new(Config)
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ func (p *Provider) ObtainCert() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if p.cfg.Provider == ProviderLocal {
|
||||
if p.cfg.Provider == ProviderPseudo {
|
||||
t := time.NewTicker(1000 * time.Millisecond)
|
||||
defer t.Stop()
|
||||
logging.Info().Msg("init client for pseudo provider")
|
||||
|
||||
@@ -58,6 +58,8 @@ var (
|
||||
MetricsDisableDisk = GetEnvBool("METRICS_DISABLE_DISK", false)
|
||||
MetricsDisableNetwork = GetEnvBool("METRICS_DISABLE_NETWORK", false)
|
||||
MetricsDisableSensors = GetEnvBool("METRICS_DISABLE_SENSORS", false)
|
||||
|
||||
ForceResolveCountry = GetEnvBool("FORCE_RESOLVE_COUNTRY", false)
|
||||
)
|
||||
|
||||
func GetEnv[T any](key string, defaultValue T, parser func(string) (T, error)) T {
|
||||
|
||||
@@ -265,6 +265,10 @@ func (cfg *Config) initAutoCert(autocertCfg *autocert.Config) gperr.Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if autocertCfg == nil {
|
||||
autocertCfg = new(autocert.Config)
|
||||
}
|
||||
|
||||
user, legoCfg, err := autocertCfg.GetLegoConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -53,6 +54,7 @@ type (
|
||||
)
|
||||
|
||||
const (
|
||||
StdoutbufSize = 64
|
||||
MinBufferSize = 4 * kilobyte
|
||||
MaxBufferSize = 1 * megabyte
|
||||
)
|
||||
@@ -79,6 +81,22 @@ func NewMockAccessLogger(parent task.Parent, cfg *RequestLoggerConfig) *AccessLo
|
||||
return NewAccessLoggerWithIO(parent, NewMockFile(), cfg)
|
||||
}
|
||||
|
||||
func unwrap[Writer any](w io.Writer) []Writer {
|
||||
var result []Writer
|
||||
if unwrapped, ok := w.(MultiWriterInterface); ok {
|
||||
for _, w := range unwrapped.Unwrap() {
|
||||
if unwrapped, ok := w.(Writer); ok {
|
||||
result = append(result, unwrapped)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
if unwrapped, ok := w.(Writer); ok {
|
||||
return []Writer{unwrapped}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewAccessLoggerWithIO(parent task.Parent, writer WriterWithName, anyCfg AnyConfig) *AccessLogger {
|
||||
cfg := anyCfg.ToConfig()
|
||||
if cfg.BufferSize == 0 {
|
||||
@@ -90,6 +108,10 @@ func NewAccessLoggerWithIO(parent task.Parent, writer WriterWithName, anyCfg Any
|
||||
if cfg.BufferSize > MaxBufferSize {
|
||||
cfg.BufferSize = MaxBufferSize
|
||||
}
|
||||
if _, ok := writer.(*os.File); ok {
|
||||
cfg.BufferSize = StdoutbufSize
|
||||
}
|
||||
|
||||
l := &AccessLogger{
|
||||
task: parent.Subtask("accesslog."+writer.Name(), true),
|
||||
cfg: cfg,
|
||||
@@ -99,23 +121,8 @@ func NewAccessLoggerWithIO(parent task.Parent, writer WriterWithName, anyCfg Any
|
||||
logger: logging.With().Str("file", writer.Name()).Logger(),
|
||||
}
|
||||
|
||||
if unwrapped, ok := writer.(MultiWriterInterface); ok {
|
||||
for _, w := range unwrapped.Unwrap() {
|
||||
if sr, ok := w.(supportRotate); ok {
|
||||
l.supportRotate = append(l.supportRotate, sr)
|
||||
}
|
||||
if closer, ok := w.(io.Closer); ok {
|
||||
l.closer = append(l.closer, closer)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if sr, ok := writer.(supportRotate); ok {
|
||||
l.supportRotate = append(l.supportRotate, sr)
|
||||
}
|
||||
if closer, ok := writer.(io.Closer); ok {
|
||||
l.closer = append(l.closer, closer)
|
||||
}
|
||||
}
|
||||
l.supportRotate = unwrap[supportRotate](writer)
|
||||
l.closer = unwrap[io.Closer](writer)
|
||||
|
||||
if cfg.req != nil {
|
||||
fmt := CommonFormatter{cfg: &cfg.req.Fields}
|
||||
|
||||
@@ -83,9 +83,6 @@ func (disp *Dispatcher) start() {
|
||||
}
|
||||
|
||||
func (disp *Dispatcher) dispatch(msg *LogMessage) {
|
||||
if true {
|
||||
return
|
||||
}
|
||||
task := disp.task.Subtask("dispatcher")
|
||||
defer task.Finish("notif dispatched")
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ func Ver(major, minor, patch int) Version {
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
return fmt.Sprintf("%d.%d.%d", v.Generation, v.Major, v.Minor)
|
||||
return fmt.Sprintf("v%d.%d.%d", v.Generation, v.Major, v.Minor)
|
||||
}
|
||||
|
||||
func (v Version) MarshalText() ([]byte, error) {
|
||||
@@ -101,17 +101,17 @@ func ParseVersion(v string) (ver Version) {
|
||||
if len(parts) != 3 {
|
||||
return
|
||||
}
|
||||
major, err := strconv.Atoi(parts[0])
|
||||
gen, err := strconv.Atoi(parts[0])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
minor, err := strconv.Atoi(parts[1])
|
||||
major, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
patch, err := strconv.Atoi(parts[2])
|
||||
minor, err := strconv.Atoi(parts[2])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return Ver(major, minor, patch)
|
||||
return Ver(gen, major, minor)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user