diff --git a/config/config.go b/config/config.go index 06f477031..e508839b8 100644 --- a/config/config.go +++ b/config/config.go @@ -103,11 +103,12 @@ type Common struct { MemoryReturnInterval time.Duration `toml:"memory-return-interval" json:"memory-return-interval" comment:"daemon will return the freed memory to the OS when it>0"` HeadersToLog []string `toml:"headers-to-log" json:"headers-to-log" comment:"additional request headers to log"` - BaseWeight int `toml:"base_weight" json:"base_weight" comment:"service discovery base weight (on idle)"` - SDType SDType `toml:"service-discovery-type" json:"service-discovery-type" comment:"service discovery type"` - SD string `toml:"service-discovery" json:"service-discovery" comment:"service discovery address (consul)"` - SDNamespace string `toml:"service-discovery-ns" json:"service-discovery-ns" comment:"service discovery namespace (graphite by default)"` - SDDc []string `toml:"service-discovery-ds" json:"service-discovery-ds" comment:"service discovery datacenters (first - is primary, in other register as backup)"` + BaseWeight int `toml:"base_weight" json:"base_weight" comment:"service discovery base weight (on idle)"` + SDType SDType `toml:"service-discovery-type" json:"service-discovery-type" comment:"service discovery type"` + SD string `toml:"service-discovery" json:"service-discovery" comment:"service discovery address (consul)"` + SDNamespace string `toml:"service-discovery-ns" json:"service-discovery-ns" comment:"service discovery namespace (graphite by default)"` + SDDc []string `toml:"service-discovery-ds" json:"service-discovery-ds" comment:"service discovery datacenters (first - is primary, in other register as backup)"` + SDExpire time.Duration `toml:"service-discovery-expire" json:"service-discovery-expire" comment:"service discovery expire duration for cleanup (minimum is 24h, if enabled)"` FindCacheConfig CacheConfig `toml:"find-cache" json:"find-cache" comment:"find/tags cache config"` @@ -278,11 +279,11 @@ type Carbonlink struct { // Prometheus configuration type Prometheus struct { - Listen string `toml:"listen" json:"listen" comment:"listen addr for prometheus ui and api"` - ExternalURLRaw string `toml:"external-url" json:"external-url" comment:"allows to set URL for redirect manually"` - ExternalURL *url.URL `toml:"-" json:"-"` - PageTitle string `toml:"page-title" json:"page-title"` - LookbackDelta time.Duration `toml:"lookback-delta" json:"lookback-delta"` + Listen string `toml:"listen" json:"listen" comment:"listen addr for prometheus ui and api"` + ExternalURLRaw string `toml:"external-url" json:"external-url" comment:"allows to set URL for redirect manually"` + ExternalURL *url.URL `toml:"-" json:"-"` + PageTitle string `toml:"page-title" json:"page-title"` + LookbackDelta time.Duration `toml:"lookback-delta" json:"lookback-delta"` RemoteReadConcurrencyLimit int `toml:"remote-read-concurrency-limit" json:"remote-read-concurrency-limit" comment:"concurrently handled remote read requests"` } @@ -393,10 +394,10 @@ func New() *Config { TotalTimeout: 500 * time.Millisecond, }, Prometheus: Prometheus{ - ExternalURLRaw: "", - PageTitle: "Prometheus Time Series Collection and Processing Server", - Listen: ":9092", - LookbackDelta: 5 * time.Minute, + ExternalURLRaw: "", + PageTitle: "Prometheus Time Series Collection and Processing Server", + Listen: ":9092", + LookbackDelta: 5 * time.Minute, RemoteReadConcurrencyLimit: 10, }, Debug: Debug{ @@ -722,6 +723,9 @@ func (c *Config) NeedLoadAvgColect() bool { if c.Common.SDNamespace == "" { c.Common.SDNamespace = "graphite" } + if c.Common.SDExpire < 24*time.Hour { + c.Common.SDExpire = 24 * time.Hour + } return true } if c.ClickHouse.RenderAdaptiveQueries > 0 { diff --git a/doc/config.md b/doc/config.md index 52dfa0fe9..ca4b85b84 100644 --- a/doc/config.md +++ b/doc/config.md @@ -202,6 +202,8 @@ Only one tag used as filter for index field Tag1, see graphite_tagged table [str service-discovery-ns = "" # service discovery datacenters (first - is primary, in other register as backup) service-discovery-ds = [] + # service discovery expire duration for cleanup (minimum is 24h, if enabled) + service-discovery-expire = "0s" # find/tags cache config [common.find-cache] diff --git a/go.mod b/go.mod index a38f2f317..0d53058f4 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/lomik/zapwriter v0.0.0-20210624082824-c1161d1eb463 github.com/msaf1980/go-expirecache v0.0.2 github.com/msaf1980/go-metrics v0.0.14 - github.com/msaf1980/go-stringutils v0.1.4 + github.com/msaf1980/go-stringutils v0.1.6 github.com/msaf1980/go-syncutils v0.0.3 github.com/msaf1980/go-timeutils v0.0.3 github.com/pelletier/go-toml v1.9.5 diff --git a/go.sum b/go.sum index c650fb765..1bb9392bd 100644 --- a/go.sum +++ b/go.sum @@ -585,8 +585,8 @@ github.com/msaf1980/go-metrics v0.0.11/go.mod h1:8VcR8MdyvIJpcVLOVFKbhb27+60tXy0 github.com/msaf1980/go-metrics v0.0.14 h1:gD0kCG5MDbon33Nkz49yW6kz3yu0DHzDN0SxjGTWlTA= github.com/msaf1980/go-metrics v0.0.14/go.mod h1:8VcR8MdyvIJpcVLOVFKbhb27+60tXy0M+zq7Ag8a6Pw= github.com/msaf1980/go-stringutils v0.1.2/go.mod h1:AxmV/6JuQUAtZJg5XmYATB5ZwCWgtpruVHY03dswRf8= -github.com/msaf1980/go-stringutils v0.1.4 h1:UwsIT0hplHVucqbknk3CoNqKkmIuSHhsbBldXxyld5U= -github.com/msaf1980/go-stringutils v0.1.4/go.mod h1:AxmV/6JuQUAtZJg5XmYATB5ZwCWgtpruVHY03dswRf8= +github.com/msaf1980/go-stringutils v0.1.6 h1:qri8o+4XLJCJYemHcvJY6xJhrGTmllUoPwayKEj4NSg= +github.com/msaf1980/go-stringutils v0.1.6/go.mod h1:xpicaTIpLAVzL0gUQkciB1zjypDGKsOCI25cKQbRQYA= github.com/msaf1980/go-syncutils v0.0.3 h1:bd6+yTSB8/CmpG7M6j1gq5sJMyPqecjJcBf19s2Y6u4= github.com/msaf1980/go-syncutils v0.0.3/go.mod h1:zoZwQNkDATcfKq5lQPK6dmJT7Z01COxw/vd8bcJyC9w= github.com/msaf1980/go-timeutils v0.0.3 h1:c0NIpJBcU6KoMeMCPdnbGFcaP4sm7VCwoW1cdgsmUkU= diff --git a/graphite-clickhouse.go b/graphite-clickhouse.go index 3f89efb2f..8ef80d24c 100644 --- a/graphite-clickhouse.go +++ b/graphite-clickhouse.go @@ -14,6 +14,7 @@ import ( "os/signal" "runtime" "runtime/debug" + "strings" "sync" "syscall" "time" @@ -33,7 +34,6 @@ import ( "github.com/lomik/graphite-clickhouse/prometheus" "github.com/lomik/graphite-clickhouse/render" "github.com/lomik/graphite-clickhouse/sd" - "github.com/lomik/graphite-clickhouse/sd/nginx" "github.com/lomik/graphite-clickhouse/tagger" ) @@ -108,7 +108,10 @@ func main() { ) sdList := flag.Bool("sd-list", false, "List registered nodes in SD") - sdClean := flag.Bool("sd-clean", false, "Cleanup registered nodes in SD") + sdDelete := flag.Bool("sd-delete", false, "Delete registered nodes for this hostname in SD") + sdEvict := flag.String("sd-evict", "", "Delete registered nodes for hostname in SD") + sdClean := flag.Bool("sd-clean", false, "Cleanup expired registered nodes in SD") + sdExpired := flag.Bool("sd-expired", false, "List expired registered nodes in SD") printVersion := flag.Bool("version", false, "Print version") verbose := flag.Bool("verbose", false, "Verbose (print config on startup)") @@ -137,35 +140,61 @@ func main() { return } - if *sdList || *sdClean { + if *sdEvict != "" { if cfg.Common.SD != "" && cfg.NeedLoadAvgColect() { - var sd sd.SD + var s sd.SD logger := zapwriter.Default() - switch cfg.Common.SDType { - case config.SDNginx: - sd = nginx.New(cfg.Common.SD, cfg.Common.SDNamespace, "", logger) - default: - panic(fmt.Errorf("service discovery type %q can be registered", cfg.Common.SDType.String())) + if s, err = sd.New(&cfg.Common, *sdEvict, logger); err != nil { + fmt.Fprintf(os.Stderr, "service discovery type %q can be registered", cfg.Common.SDType.String()) + os.Exit(1) } - ts := time.Now().Unix() - 3600 - if nodes, err := sd.Nodes(); err == nil { - for _, node := range nodes { - if *sdClean && node.Flags > 0 { - if ts > node.Flags { - fmt.Printf("%s: %s (%s), deleted\n", node.Key, node.Value, time.Unix(node.Flags, 0).UTC().Format(time.RFC3339Nano)) - // sd.Delete(node.Key, node.Value) - } else { - fmt.Printf("%s: %s (%s)\n", node.Key, node.Value, time.Unix(node.Flags, 0).UTC().Format(time.RFC3339Nano)) - } - } else { - fmt.Printf("%s: %s (%s)\n", node.Key, node.Value, time.Unix(node.Flags, 0).UTC().Format(time.RFC3339Nano)) - } + err = s.Clear("", "") + } + return + } else if *sdList || *sdDelete || *sdExpired || *sdClean { + if cfg.Common.SD != "" && cfg.NeedLoadAvgColect() { + var s sd.SD + logger := zapwriter.Default() + if s, err = sd.New(&cfg.Common, "", logger); err != nil { + fmt.Fprintf(os.Stderr, "service discovery type %q can be registered", cfg.Common.SDType.String()) + os.Exit(1) + } + + // sdList := flag.Bool("sd-list", false, "List registered nodes in SD") + // sdDelete := flag.Bool("sd-delete", false, "Delete registered nodes for this hostname in SD") + // sdEvict := flag.String("sd-evict", "", "Delete registered nodes for hostname in SD") + // sdClean := flag.Bool("sd-clean", false, "Cleanup expired registered nodes in SD") + + if *sdDelete { + hostname, _ := os.Hostname() + hostname, _, _ = strings.Cut(hostname, ".") + if err = s.Clear("", ""); err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(1) + } + } else if *sdExpired { + if err = sd.Cleanup(&cfg.Common, s, true); err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(1) + } + } else if *sdClean { + if err = sd.Cleanup(&cfg.Common, s, false); err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(1) } } else { - log.Fatal(err) + if nodes, err := s.Nodes(); err == nil { + for _, node := range nodes { + fmt.Printf("%s/%s: %s (%s)\n", s.Namespace(), node.Key, node.Value, time.Unix(node.Flags, 0).UTC().Format(time.RFC3339Nano)) + } + } else { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(1) + } } } else { fmt.Fprintln(os.Stderr, "SD not enabled") + os.Exit(1) } return } @@ -288,29 +317,30 @@ func main() { } }() + if cfg.Common.SD != "" && cfg.NeedLoadAvgColect() { + go func() { + time.Sleep(time.Millisecond * 100) + sdLogger := localManager.Logger("service discovery") + sd.Register(&cfg.Common, sdLogger) + }() + } + go func() { stop := make(chan os.Signal, 1) signal.Notify(stop, syscall.SIGTERM, syscall.SIGINT) <-stop logger.Info("stoping graphite-clickhouse") - if cfg.Common.SDType != config.SDNone { + if cfg.Common.SD != "" { // unregister SD sd.Stop() time.Sleep(10 * time.Second) } // initiating the shutdown - ctx, _ := context.WithTimeout(context.Background(), time.Second*10) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) srv.Shutdown(ctx) + cancel() }() - if cfg.Common.SD != "" { - go func() { - time.Sleep(time.Millisecond * 100) - sdLogger := localManager.Logger("service discovery") - sd.Register(cfg, sdLogger) - }() - } - exitWait.Wait() logger.Info("stop graphite-clickhouse") diff --git a/load_avg/load_avg.go b/load_avg/load_avg.go index c3cf8118a..43b6080b4 100644 --- a/load_avg/load_avg.go +++ b/load_avg/load_avg.go @@ -19,15 +19,21 @@ func Store(f float64) { } func Weight(n int, l float64) int64 { + if n <= 0 || l >= 2.0 { + return 1 + } // (1 / normalized_load_avg - 1) l = math.Round(10*l) / 10 if l == 0 { return 2 * int64(n) } + if l > 1.0 { + l *= 4 + } l = math.Log10(l) w := int64(n) - int64(float64(n)*l) - if w < 0 { - return 0 + if w <= 0 { + return 1 } return w } diff --git a/load_avg/load_avg_test.go b/load_avg/load_avg_test.go index b569a162c..8c5324a34 100644 --- a/load_avg/load_avg_test.go +++ b/load_avg/load_avg_test.go @@ -19,12 +19,12 @@ func TestWeight(t *testing.T) { {n: 100, l: 0.5, want: 130}, {n: 100, l: 0.9, want: 104}, {n: 100, l: 1, want: 100}, - {n: 100, l: 1.1, want: 96}, - {n: 100, l: 2, want: 70}, - {n: 100, l: 4, want: 40}, - {n: 100, l: 9, want: 5}, - {n: 100, l: 10, want: 0}, - {n: 100, l: 20, want: 0}, + {n: 100, l: 1.1, want: 36}, + {n: 100, l: 1.9, want: 12}, + {n: 100, l: 2, want: 1}, + {n: 100, l: 9, want: 1}, + {n: 100, l: 10, want: 1}, + {n: 100, l: 20, want: 1}, // n : 1000 {n: 1000, l: 0, want: 2000}, {n: 1000, l: 0.1, want: 1999}, @@ -33,12 +33,14 @@ func TestWeight(t *testing.T) { {n: 1000, l: 0.5, want: 1301}, {n: 1000, l: 0.9, want: 1045}, {n: 1000, l: 1, want: 1000}, - {n: 1000, l: 1.1, want: 959}, - {n: 1000, l: 2, want: 699}, - {n: 1000, l: 4, want: 398}, - {n: 1000, l: 9, want: 46}, - {n: 1000, l: 10, want: 0}, - {n: 1000, l: 20, want: 0}, + {n: 1000, l: 1.1, want: 357}, + {n: 1000, l: 1.9, want: 120}, + {n: 1000, l: 2, want: 1}, + {n: 1000, l: 3, want: 1}, + {n: 1000, l: 4, want: 1}, + {n: 1000, l: 9, want: 1}, + {n: 1000, l: 10, want: 1}, + {n: 1000, l: 20, want: 1}, } for _, tt := range tests { t.Run(fmt.Sprintf("%d#%f", tt.n, tt.l), func(t *testing.T) { diff --git a/sd/nginx/nginx.go b/sd/nginx/nginx.go index 4729d2298..09d4d7077 100644 --- a/sd/nginx/nginx.go +++ b/sd/nginx/nginx.go @@ -15,14 +15,12 @@ import ( ) type ErrInvalidKey struct { + key string val string } func (e ErrInvalidKey) Error() string { - if e.val == "" { - return "list key is invalid" - } - return "list key is invalid: '" + e.val + "'" + return "list key '" + e.key + "' is invalid: '" + e.val + "'" } var ( @@ -50,6 +48,7 @@ func splitNode(node string) (dc, host, listen string, ok bool) { type Nginx struct { weight int64 hostname string + namespace string body []byte backupBody []byte url stringutils.Builder @@ -60,14 +59,18 @@ type Nginx struct { } func New(url, namespace, hostname string, logger *zap.Logger) *Nginx { + if namespace == "" { + namespace = "graphite" + } sd := &Nginx{ logger: logger, body: make([]byte, 128), backupBody: []byte(`{"backup":1,"max_fails":0}`), nsEnd: "upstreams/" + namespace + "/", hostname: hostname, + namespace: namespace, } - sd.setWeight(0) + sd.setWeight(1) sd.url.WriteString(url) sd.url.WriteByte('/') @@ -81,6 +84,9 @@ func New(url, namespace, hostname string, logger *zap.Logger) *Nginx { } func (sd *Nginx) setWeight(weight int64) { + if weight <= 0 { + weight = 1 + } if sd.weight != weight { sd.weight = weight sd.body = sd.body[:0] @@ -90,6 +96,10 @@ func (sd *Nginx) setWeight(weight int64) { } } +func (sd *Nginx) Namespace() string { + return sd.namespace +} + func (sd *Nginx) List() (nodes []string, err error) { sd.url.Truncate(sd.pos) sd.url.WriteString("?recurse") @@ -115,7 +125,7 @@ func (sd *Nginx) List() (nodes []string, err error) { nodes = append(nodes, s) } } else { - return nil, ErrInvalidKey{s} + return nil, ErrInvalidKey{key: sd.nsEnd, val: s} } } else { return nil, ErrNoKey @@ -166,7 +176,7 @@ func (sd *Nginx) ListMap() (nodes map[string]string, err error) { } } } else { - return nil, ErrInvalidKey{s} + return nil, ErrInvalidKey{key: sd.nsEnd, val: s} } } else { return nil, ErrNoKey @@ -222,7 +232,7 @@ func (sd *Nginx) Nodes() (nodes []utils.KV, err error) { } nodes = append(nodes, kv) } else { - return nil, ErrInvalidKey{s} + return nil, ErrInvalidKey{key: sd.nsEnd, val: s} } } else { return nil, ErrNoKey @@ -304,6 +314,17 @@ func (sd *Nginx) Update(ip, port string, dc []string, weight int64) error { return sd.update(ip, port, dc) } +func (sd *Nginx) DeleteNode(node string) (err error) { + sd.url.Truncate(sd.pos) + sd.url.WriteString(node) + + if err = utils.HttpDelete(sd.url.String()); err != nil { + sd.logger.Error("delete", zap.String("address", sd.url.String()[sd.pos:]), zap.Error(err)) + } + + return +} + func (sd *Nginx) Delete(ip, port string, dc []string) (err error) { if len(dc) == 0 { sd.url.Truncate(sd.pos) diff --git a/sd/nginx/nginx_test.go b/sd/nginx/nginx_test.go index 076c4736e..bc8eba35b 100644 --- a/sd/nginx/nginx_test.go +++ b/sd/nginx/nginx_test.go @@ -35,7 +35,7 @@ func TestNginx(t *testing.T) { logger := zapwriter.Default() sd1 := New("http://127.0.0.1:8500/v1/kv/upstreams", "graphite", hostname1, logger) - sd2 := New("http://127.0.0.1:8500/v1/kv/upstreams", "graphite", hostname2, logger) + sd2 := New("http://127.0.0.1:8500/v1/kv/upstreams", "", hostname2, logger) err := sd1.Clear("", "") require.True(t, err == nil || err == utils.ErrNotFound, err) @@ -170,7 +170,7 @@ func TestNginxDC(t *testing.T) { logger := zapwriter.Default() - sd1 := New("http://127.0.0.1:8500/v1/kv/upstreams", "graphite", hostname1, logger) + sd1 := New("http://127.0.0.1:8500/v1/kv/upstreams", "", hostname1, logger) sd2 := New("http://127.0.0.1:8500/v1/kv/upstreams", "graphite", hostname2, logger) err := sd1.Clear("", "") diff --git a/sd/nginx/tests/nginx_cleanup_test.go b/sd/nginx/tests/nginx_cleanup_test.go new file mode 100644 index 000000000..b8f95c246 --- /dev/null +++ b/sd/nginx/tests/nginx_cleanup_test.go @@ -0,0 +1,171 @@ +//go:build test_sd +// +build test_sd + +package nginx_test + +import ( + "testing" + "time" + + "github.com/lomik/graphite-clickhouse/config" + "github.com/lomik/graphite-clickhouse/sd" + "github.com/lomik/graphite-clickhouse/sd/utils" + "github.com/lomik/zapwriter" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + hostname1 = "test_host1" + ip1 = "192.168.0.1" + hostname2 = "test_host2" + ip2 = "192.168.1.25" + port = ":9090" + dc1 = []string{"dc1", "dc2", "dc3"} + dc2 = []string{"dc2", "dc1", "dc3"} + hostname3 = "test_host3" + + nilStringSlice []string +) + +func cleanup(nodes []utils.KV, start, end int64) { + for i := range nodes { + if nodes[i].Flags >= start && nodes[i].Flags <= end { + nodes[i].Flags = start + } + } +} + +func TestNginxExpire(t *testing.T) { + logger := zapwriter.Default() + + cfg := &config.Common{ + SDType: config.SDNginx, + SD: "http://127.0.0.1:8500/v1/kv/upstreams", + SDNamespace: "graphite", //default + SDExpire: time.Second * 5, + } + + sd1, _ := sd.New(cfg, hostname1, logger) + sd2, _ := sd.New(cfg, hostname2, logger) + + err := sd1.Clear("", "") + require.True(t, err == nil || err == utils.ErrNotFound, err) + err = sd2.Clear("", "") + require.True(t, err == nil || err == utils.ErrNotFound, err) + + nodes, err := sd1.List() + require.True(t, err == nil || err == utils.ErrNotFound, err) + assert.Equal(t, nilStringSlice, nodes) + nodes, err = sd2.List() + require.True(t, err == nil || err == utils.ErrNotFound, err) + assert.Equal(t, nilStringSlice, nodes) + + // check cleanup expired + start := time.Now().Unix() + require.NoError(t, sd1.Update(ip1, port, nil, 10)) + time.Sleep(cfg.SDExpire + time.Second) + require.NoError(t, sd2.Update(ip2, port, nil, 10)) + nodesV, err := sd1.Nodes() + end := time.Now().Unix() + require.NoError(t, err) + // reset timestamp for compare + cleanup(nodesV, start, end) + assert.Equal( + t, + []utils.KV{ + {Key: "_/test_host1/192.168.0.1:9090", Value: "{\"weight\":10,\"max_fails\":0}", Flags: start}, + {Key: "_/test_host2/192.168.1.25:9090", Value: "{\"weight\":10,\"max_fails\":0}", Flags: start}, + }, + nodesV, + "start = %d, end = %d", start, end, + ) + + sd.Cleanup(cfg, sd1, false) + nodesV, err = sd1.Nodes() + require.NoError(t, err) + // reset timestamp for compare + cleanup(nodesV, start, end) + assert.Equal( + t, + []utils.KV{ + {Key: "_/test_host2/192.168.1.25:9090", Value: "{\"weight\":10,\"max_fails\":0}", Flags: start}, + }, + nodesV, + "start = %d, end = %d", start, end, + ) +} + +func TestNginxExpireDC(t *testing.T) { + logger := zapwriter.Default() + + cfg1 := &config.Common{ + SDType: config.SDNginx, + SD: "http://127.0.0.1:8500/v1/kv/upstreams", + SDNamespace: "graphite", //default + SDDc: dc1, + SDExpire: time.Second * 5, + } + sd1, _ := sd.New(cfg1, hostname1, logger) + + cfg2 := &config.Common{ + SDType: config.SDNginx, + SD: "http://127.0.0.1:8500/v1/kv/upstreams", + SDNamespace: "", //default + SDDc: dc2, + SDExpire: time.Second * 5, + } + sd2, _ := sd.New(cfg2, hostname2, logger) + + err := sd1.Clear("", "") + require.True(t, err == nil || err == utils.ErrNotFound, err) + err = sd2.Clear("", "") + require.True(t, err == nil || err == utils.ErrNotFound, err) + + nodes, err := sd1.List() + require.True(t, err == nil || err == utils.ErrNotFound, err) + assert.Equal(t, nilStringSlice, nodes) + nodes, err = sd2.List() + require.True(t, err == nil || err == utils.ErrNotFound, err) + assert.Equal(t, nilStringSlice, nodes) + + // check cleanup expired + start := time.Now().Unix() + require.NoError(t, sd1.Update(ip1, port, dc1, 10)) + time.Sleep(cfg1.SDExpire + time.Second) + require.NoError(t, sd2.Update(ip2, port, dc2, 10)) + nodesV, err := sd1.Nodes() + end := time.Now().Unix() + require.NoError(t, err) + // reset timestamp for compare + cleanup(nodesV, start, end) + assert.Equal( + t, + []utils.KV{ + {Key: "dc1/test_host1/192.168.0.1:9090", Value: "{\"weight\":10,\"max_fails\":0}", Flags: start}, + {Key: "dc1/test_host2/192.168.1.25:9090", Value: "{\"backup\":1,\"max_fails\":0}", Flags: start}, + {Key: "dc2/test_host1/192.168.0.1:9090", Value: "{\"backup\":1,\"max_fails\":0}", Flags: start}, + {Key: "dc2/test_host2/192.168.1.25:9090", Value: "{\"weight\":10,\"max_fails\":0}", Flags: start}, + {Key: "dc3/test_host1/192.168.0.1:9090", Value: "{\"backup\":1,\"max_fails\":0}", Flags: start}, + {Key: "dc3/test_host2/192.168.1.25:9090", Value: "{\"backup\":1,\"max_fails\":0}", Flags: start}, + }, + nodesV, + "start = %d, end = %d", start, end, + ) + + sd.Cleanup(cfg1, sd1, false) + nodesV, err = sd1.Nodes() + require.NoError(t, err) + // reset timestamp for compare + cleanup(nodesV, start, end) + assert.Equal( + t, + []utils.KV{ + {Key: "dc1/test_host2/192.168.1.25:9090", Value: "{\"backup\":1,\"max_fails\":0}", Flags: start}, + {Key: "dc2/test_host2/192.168.1.25:9090", Value: "{\"weight\":10,\"max_fails\":0}", Flags: start}, + {Key: "dc3/test_host2/192.168.1.25:9090", Value: "{\"backup\":1,\"max_fails\":0}", Flags: start}, + }, + nodesV, + "start = %d, end = %d", start, end, + ) +} diff --git a/sd/register.go b/sd/register.go index fea1cab85..a793bd799 100644 --- a/sd/register.go +++ b/sd/register.go @@ -1,6 +1,8 @@ package sd import ( + "errors" + "fmt" "os" "strings" "time" @@ -22,15 +24,31 @@ var ( type SD interface { // Update update node record Update(listenIP, listenPort string, dc []string, weight int64) error - // Delete delete node record (with previous IP/port) - Delete(ip, port string, dc []string) error + // Delete delete node record (with ip/port/dcs) + Delete(ip, port string, dcs []string) error + // Delete delete node record + DeleteNode(node string) (err error) // Clear clear node record (all except with current listen IP/port) Clear(listenIP, listenPort string) error - // Nodes return all registered nodes + // Nodes return all registered nodes (for all hostnames in namespace) Nodes() (nodes []utils.KV, err error) + // List return all registered nodes for hostname + List() (nodes []string, err error) + // Namespace return namespace + Namespace() string } -func Register(cfg *config.Config, logger *zap.Logger) { +func New(cfg *config.Common, hostname string, logger *zap.Logger) (SD, error) { + switch cfg.SDType { + case config.SDNginx: + sd := nginx.New(cfg.SD, cfg.SDNamespace, hostname, logger) + return sd, nil + default: + return nil, errors.New("serive discovery type not registered") + } +} + +func Register(cfg *config.Common, logger *zap.Logger) { var ( listenIP string prevIP string @@ -40,8 +58,8 @@ func Register(cfg *config.Config, logger *zap.Logger) { load float64 w int64 ) - if cfg.Common.SD != "" { - if strings.HasPrefix(cfg.Common.Listen, ":") { + if cfg.SD != "" { + if strings.HasPrefix(cfg.Listen, ":") { registerFirst = true listenIP = utils.GetLocalIP() prevIP = listenIP @@ -49,10 +67,8 @@ func Register(cfg *config.Config, logger *zap.Logger) { hostname, _ = os.Hostname() hostname, _, _ = strings.Cut(hostname, ".") - switch cfg.Common.SDType { - case config.SDNginx: - sd = nginx.New(cfg.Common.SD, cfg.Common.SDNamespace, hostname, logger) - default: + sd, err = New(cfg, hostname, logger) + if err != nil { panic("serive discovery type not registered") } load, err = load_avg.Normalized() @@ -64,9 +80,9 @@ func Register(cfg *config.Config, logger *zap.Logger) { zap.String("hostname", hostname), ) - w = load_avg.Weight(cfg.Common.BaseWeight, load) - sd.Update(listenIP, cfg.Common.Listen, cfg.Common.SDDc, w) - sd.Clear(listenIP, cfg.Common.Listen) + w = load_avg.Weight(cfg.BaseWeight, load) + sd.Update(listenIP, cfg.Listen, cfg.SDDc, w) + sd.Clear(listenIP, cfg.Listen) } LOOP: for { @@ -75,17 +91,17 @@ LOOP: load_avg.Store(load) } if sd != nil { - w = load_avg.Weight(cfg.Common.BaseWeight, load) + w = load_avg.Weight(cfg.BaseWeight, load) if registerFirst { // if listen on all ip, try to register with first ip listenIP = utils.GetLocalIP() } - sd.Update(listenIP, cfg.Common.Listen, cfg.Common.SDDc, w) + sd.Update(listenIP, cfg.Listen, cfg.SDDc, w) if prevIP != listenIP { - sd.Delete(prevIP, cfg.Common.Listen, cfg.Common.SDDc) + sd.Delete(prevIP, cfg.Listen, cfg.SDDc) prevIP = listenIP } } @@ -115,3 +131,30 @@ LOOP: func Stop() { stop <- struct{}{} } + +func Cleanup(cfg *config.Common, sd SD, checkOnly bool) error { + if cfg.SD != "" && cfg.SDExpire > 0 { + ts := time.Now().Unix() - int64(cfg.SDExpire.Seconds()) + if nodes, err := sd.Nodes(); err == nil { + for _, node := range nodes { + if node.Flags > 0 { + if ts > node.Flags { + if checkOnly { + fmt.Printf("%s: %s (%s), expired\n", node.Key, node.Value, time.Unix(node.Flags, 0).UTC().Format(time.RFC3339Nano)) + } else { + if err = sd.DeleteNode(node.Key); err != nil { + return err + } + fmt.Printf("%s: %s (%s), deleted\n", node.Key, node.Value, time.Unix(node.Flags, 0).UTC().Format(time.RFC3339Nano)) + } + } + } else { + fmt.Printf("%s: %s (%s)\n", node.Key, node.Value, time.Unix(node.Flags, 0).UTC().Format(time.RFC3339Nano)) + } + } + } else { + return err + } + } + return nil +} diff --git a/sd/utils/utils.go b/sd/utils/utils.go index defe3d907..8b0a8bd1f 100644 --- a/sd/utils/utils.go +++ b/sd/utils/utils.go @@ -33,7 +33,7 @@ func HttpGet(url string) ([]byte, error) { if resp.StatusCode == http.StatusNotFound { return nil, ErrNotFound } - if resp.StatusCode >= 300 { + if resp.StatusCode != 200 { return nil, errs.NewErrorWithCode(string(data), resp.StatusCode) } return data, err @@ -54,7 +54,7 @@ func HttpPut(url string, body []byte) error { if resp.StatusCode == http.StatusNotFound { return ErrNotFound } - if resp.StatusCode >= 300 { + if resp.StatusCode != 200 { data, _ := io.ReadAll(resp.Body) return errs.NewErrorWithCode(string(data), resp.StatusCode) } @@ -75,7 +75,7 @@ func HttpDelete(url string) error { if resp.StatusCode == http.StatusNotFound { return ErrNotFound } - if resp.StatusCode >= 300 { + if resp.StatusCode != 200 { data, _ := io.ReadAll(resp.Body) return errs.NewErrorWithCode(string(data), resp.StatusCode) } diff --git a/vendor/github.com/msaf1980/go-stringutils/bytes.go b/vendor/github.com/msaf1980/go-stringutils/bytes.go new file mode 100644 index 000000000..4ff9b556d --- /dev/null +++ b/vendor/github.com/msaf1980/go-stringutils/bytes.go @@ -0,0 +1,65 @@ +package stringutils + +// ToLowerBytes converts ascii slice to lower-case in-place. +func ToLowerBytes(b []byte) []byte { + for i := 0; i < len(b); i++ { + b[i] = toLowerTable[b[i]] + } + return b +} + +// ToUpperBytes converts ascii slice to upper-case in-place. +func ToUpperBytes(b []byte) []byte { + for i := 0; i < len(b); i++ { + b[i] = toUpperTable[b[i]] + } + return b +} + +// TrimRightBytes is the equivalent of bytes.TrimRight +func TrimRightBytes(b []byte, cutset byte) []byte { + lenStr := len(b) + for lenStr > 0 && b[lenStr-1] == cutset { + lenStr-- + } + return b[:lenStr] +} + +// TrimLeftBytes is the equivalent of bytes.TrimLeft +func TrimLeftBytes(b []byte, cutset byte) []byte { + lenStr, start := len(b), 0 + for start < lenStr && b[start] == cutset { + start++ + } + return b[start:] +} + +// TrimBytes is the equivalent of bytes.Trim +func TrimBytes(b []byte, cutset byte) []byte { + i, j := 0, len(b)-1 + for ; i <= j; i++ { + if b[i] != cutset { + break + } + } + for ; i < j; j-- { + if b[j] != cutset { + break + } + } + + return b[i : j+1] +} + +// EqualFoldBytes tests ascii slices for equality case-insensitively +func EqualFoldBytes(b, s []byte) bool { + if len(b) != len(s) { + return false + } + for i := len(b) - 1; i >= 0; i-- { + if toUpperTable[b[i]] != toUpperTable[s[i]] { + return false + } + } + return true +} diff --git a/vendor/github.com/msaf1980/go-stringutils/case.go b/vendor/github.com/msaf1980/go-stringutils/case.go index ec56c642b..3af629238 100644 --- a/vendor/github.com/msaf1980/go-stringutils/case.go +++ b/vendor/github.com/msaf1980/go-stringutils/case.go @@ -129,3 +129,43 @@ func (sb *Builder) WriteStringLower(s string) { sb.Map(unicode.ToLower, s) } } + +const ( + toLowerTable = "\x00\x01\x02\x03\x04\x05\x06\a\b\t\n\v\f\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&'()*+,-./0123456789:;<=>?@abcdefghijklmnopqrstuvwxyz[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\u007f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + toUpperTable = "\x00\x01\x02\x03\x04\x05\x06\a\b\t\n\v\f\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`ABCDEFGHIJKLMNOPQRSTUVWXYZ{|}~\u007f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" +) + +// ToLower converts ascii string to lower-case +func ToLower(b string) string { + res := make([]byte, len(b)) + copy(res, b) + for i := 0; i < len(res); i++ { + res[i] = toLowerTable[res[i]] + } + + return UnsafeString(res) +} + +// ToUpper converts ascii string to upper-case +func ToUpper(b string) string { + res := make([]byte, len(b)) + copy(res, b) + for i := 0; i < len(res); i++ { + res[i] = toUpperTable[res[i]] + } + + return UnsafeString(res) +} + +// EqualFold tests ascii strings for equality case-insensitively +func EqualFold(b, s string) bool { + if len(b) != len(s) { + return false + } + for i := len(b) - 1; i >= 0; i-- { + if toUpperTable[b[i]] != toUpperTable[s[i]] { + return false + } + } + return true +} diff --git a/vendor/github.com/msaf1980/go-stringutils/clone.go b/vendor/github.com/msaf1980/go-stringutils/clone.go index 085059c16..e7ceb7987 100644 --- a/vendor/github.com/msaf1980/go-stringutils/clone.go +++ b/vendor/github.com/msaf1980/go-stringutils/clone.go @@ -18,3 +18,22 @@ func Clone(s string) string { copy(b, s) return UnsafeString(b) } + +// Clone returns a fresh copy of s. +// It guarantees to make a copy of s into a new allocation, +// which can be important when retaining only a small substring +// of a much larger string. Using Clone can help such programs +// use less memory. Of course, since using Clone makes a copy, +// overuse of Clone can make programs use more memory. +// Clone should typically be used only rarely, and only when +// profiling indicates that it is needed. +// For strings of length zero the string "" will be returned +// and no allocation is made. +func CloneBytes(b []byte) []byte { + if len(b) == 0 { + return nil + } + out := make([]byte, len(b)) + copy(out, b) + return out +} diff --git a/vendor/github.com/msaf1980/go-stringutils/stringbuilder.go b/vendor/github.com/msaf1980/go-stringutils/stringbuilder.go index 75078ebcf..9a08248e6 100644 --- a/vendor/github.com/msaf1980/go-stringutils/stringbuilder.go +++ b/vendor/github.com/msaf1980/go-stringutils/stringbuilder.go @@ -8,8 +8,7 @@ import ( // It minimizes memory copying. The zero value is ready to use. // Do not copy a non-zero Builder. type Builder struct { - data []byte - length int + data []byte } // grow scale factor for needed resize @@ -17,7 +16,7 @@ const scaleFactor = 2 // Len returns the number of accumulated bytes; b.Len() == len(b.String()). func (sb *Builder) Len() int { - return sb.length + return len(sb.data) } // Cap returns the capacity of the builder's underlying byte slice. It is the @@ -29,15 +28,15 @@ func (sb *Builder) Cap() int { // Bytes returns the accumulated bytes. func (sb *Builder) Bytes() []byte { - return sb.data[0:sb.length] + return sb.data } // String returns the accumulated string. func (sb *Builder) String() string { - if sb.length == 0 { + if len(sb.data) == 0 { return "" } - return UnsafeStringFromPtr(&sb.data[0], sb.length) + return UnsafeStringFromPtr(&sb.data[0], len(sb.data)) } // Grow grows b's capacity, if necessary, to guarantee space for @@ -46,22 +45,23 @@ func (sb *Builder) String() string { func (sb *Builder) Grow(capacity int) { if capacity > sb.Cap() { b := make([]byte, capacity) - copy(b, sb.data[:sb.length]) - sb.data = b + copy(b, sb.data) + length := len(sb.data) + sb.data = b[:length] } } // Reset resets the Builder to be empty. func (sb *Builder) Reset() { - if sb.length > 0 { - sb.length = 0 + if len(sb.data) > 0 { + sb.data = sb.data[:0] } } // Truncate descrease the Builder length (dangerouse for partually truncated UTF strings). func (sb *Builder) Truncate(length int) { - if sb.length > length { - sb.length = length + if len(sb.data) > length { + sb.data = sb.data[:length] } } @@ -78,17 +78,7 @@ func (sb *Builder) Write(bytes []byte) (int, error) { if len(bytes) == 0 { return 0, nil } - newlen := sb.length + len(bytes) - if newlen > cap(sb.data) { - scaled := sb.length * scaleFactor - if newlen > scaled { - sb.Grow(newlen) - } else { - sb.Grow(scaled) - } - } - copy(sb.data[sb.length:], bytes) - sb.length += len(bytes) + sb.data = append(sb.data, bytes...) return len(bytes), nil } @@ -98,17 +88,7 @@ func (sb *Builder) WriteBytes(bytes []byte) { if len(bytes) == 0 { return } - newlen := sb.length + len(bytes) - if newlen > cap(sb.data) { - scaled := sb.length * scaleFactor - if newlen > scaled { - sb.Grow(newlen) - } else { - sb.Grow(scaled) - } - } - copy(sb.data[sb.length:], bytes) - sb.length += len(bytes) + sb.data = append(sb.data, bytes...) } // WriteString appends the contents of s to b's buffer. @@ -116,32 +96,21 @@ func (sb *Builder) WriteString(s string) (int, error) { if len(s) == 0 { return 0, nil } - newlen := sb.length + len(s) - if newlen > cap(sb.data) { - scaled := sb.length * scaleFactor - if newlen > scaled { - sb.Grow(newlen) - } else { - sb.Grow(scaled) - } - } - copy(sb.data[sb.length:], s) - sb.length += len(s) + sb.data = append(sb.data, s...) return len(s), nil } // WriteByte appends the byte c to b's buffer. func (sb *Builder) WriteByte(c byte) error { - if sb.length == cap(sb.data) { - if sb.length == 0 { - sb.Grow(2 * scaleFactor) - } else { - sb.Grow(sb.length * scaleFactor) - } + if len(sb.data) == 0 { + sb.Grow(2 * scaleFactor) + } else if len(sb.data) == cap(sb.data) { + sb.Grow(len(sb.data) * scaleFactor) } - sb.data[sb.length] = c - sb.length++ + length := len(sb.data) + sb.data = sb.data[:length+1] + sb.data[length] = c return nil } @@ -153,15 +122,18 @@ func (sb *Builder) WriteRune(r rune) (int, error) { return 1, nil } else { - if sb.length+utf8.UTFMax > cap(sb.data) { - if sb.length > 2*utf8.UTFMax { - sb.Grow(sb.length * scaleFactor) + length := len(sb.data) + n := length + utf8.UTFMax + if n > cap(sb.data) { + if length > 2*utf8.UTFMax { + sb.Grow(length * scaleFactor) } else { - sb.Grow(sb.length + utf8.UTFMax*scaleFactor) + sb.Grow(length + utf8.UTFMax*scaleFactor) } } - length := utf8.EncodeRune(sb.data[sb.length:sb.length+utf8.UTFMax], r) - sb.length += length + sb.data = sb.data[:n] + n = utf8.EncodeRune(sb.data[length:], r) + sb.data = sb.data[:length+n] return length, nil } diff --git a/vendor/github.com/msaf1980/go-stringutils/stringbuilder_append.go b/vendor/github.com/msaf1980/go-stringutils/stringbuilder_append.go index d8c86275a..ad919d461 100644 --- a/vendor/github.com/msaf1980/go-stringutils/stringbuilder_append.go +++ b/vendor/github.com/msaf1980/go-stringutils/stringbuilder_append.go @@ -4,71 +4,51 @@ import "strconv" // WriteInt appends the string form of the integer i, as generated by FormatInt. func (sb *Builder) WriteInt(i int64, base int) { - sb.data = strconv.AppendInt(sb.data[:sb.length], i, base) - sb.length = len(sb.data) - sb.data = sb.data[:cap(sb.data)] + sb.data = strconv.AppendInt(sb.data, i, base) } // WriteUint appends the string form of the unsigned integer i, as generated by FormatUint. func (sb *Builder) WriteUint(i uint64, base int) { - sb.data = strconv.AppendUint(sb.data[:sb.length], i, base) - sb.length = len(sb.data) - sb.data = sb.data[:cap(sb.data)] + sb.data = strconv.AppendUint(sb.data, i, base) } // WriteFloat appends the string form of the floating-point number f, // as generated by FormatFloat. func (sb *Builder) WriteFloat(f float64, fmt byte, prec, bitSize int) { - sb.data = strconv.AppendFloat(sb.data[:sb.length], f, fmt, prec, bitSize) - sb.length = len(sb.data) - sb.data = sb.data[:cap(sb.data)] + sb.data = strconv.AppendFloat(sb.data, f, fmt, prec, bitSize) } // WriteBool appends the string form of the bool v, as generated by FormatBool. func (sb *Builder) WriteBool(v bool) { - sb.data = strconv.AppendBool(sb.data[:sb.length], v) - sb.length = len(sb.data) - sb.data = sb.data[:cap(sb.data)] + sb.data = strconv.AppendBool(sb.data, v) } // WriteQuote appends the string form of the quoted string s, as generated by Quote. func (sb *Builder) WriteQuote(s string) { - sb.data = strconv.AppendQuote(sb.data[:sb.length], s) - sb.length = len(sb.data) - sb.data = sb.data[:cap(sb.data)] + sb.data = strconv.AppendQuote(sb.data, s) } // WriteQuoteRune appends the string form of the quoted rune r, as generated by QuoteRune. func (sb *Builder) WriteQuoteRune(r rune) { - sb.data = strconv.AppendQuoteRune(sb.data[:sb.length], r) - sb.length = len(sb.data) - sb.data = sb.data[:cap(sb.data)] + sb.data = strconv.AppendQuoteRune(sb.data, r) } // WriteQuoteRuneToASCII appends the string form of the single-quoted rune r, as generated by QuoteRuneToASCII. func (sb *Builder) WriteQuoteRuneToASCII(r rune) { - sb.data = strconv.AppendQuoteRuneToASCII(sb.data[:sb.length], r) - sb.length = len(sb.data) - sb.data = sb.data[:cap(sb.data)] + sb.data = strconv.AppendQuoteRuneToASCII(sb.data, r) } // WriteQuoteToASCII appends the string form of the single-quoted string s, as generated by QuoteToASCII. func (sb *Builder) WriteQuoteToASCII(s string) { - sb.data = strconv.AppendQuoteToASCII(sb.data[:sb.length], s) - sb.length = len(sb.data) - sb.data = sb.data[:cap(sb.data)] + sb.data = strconv.AppendQuoteToASCII(sb.data, s) } // WriteQuoteRuneToGraphic appends the string form of the quoted rune r, as generated by QuoteRuneToGraphic. func (sb *Builder) WriteQuoteRuneToGraphic(r rune) { - sb.data = strconv.AppendQuoteRuneToGraphic(sb.data[:sb.length], r) - sb.length = len(sb.data) - sb.data = sb.data[:cap(sb.data)] + sb.data = strconv.AppendQuoteRuneToGraphic(sb.data, r) } // WriteQuoteToGraphic appends the string form of the quoted string s, as generated by QuoteToGraphic. func (sb *Builder) WriteQuoteToGraphic(s string) { - sb.data = strconv.AppendQuoteToGraphic(sb.data[:sb.length], s) - sb.length = len(sb.data) - sb.data = sb.data[:cap(sb.data)] + sb.data = strconv.AppendQuoteToGraphic(sb.data, s) } diff --git a/vendor/github.com/msaf1980/go-stringutils/trim.go b/vendor/github.com/msaf1980/go-stringutils/trim.go new file mode 100644 index 000000000..5bdd65fff --- /dev/null +++ b/vendor/github.com/msaf1980/go-stringutils/trim.go @@ -0,0 +1,36 @@ +package stringutils + +// TrimLeft is the equivalent of strings.TrimLeft +func TrimLeft(s string, cutset byte) string { + lenStr, start := len(s), 0 + for start < lenStr && s[start] == cutset { + start++ + } + return s[start:] +} + +// Trim is the equivalent of strings.Trim +func Trim(s string, cutset byte) string { + i, j := 0, len(s)-1 + for ; i <= j; i++ { + if s[i] != cutset { + break + } + } + for ; i < j; j-- { + if s[j] != cutset { + break + } + } + + return s[i : j+1] +} + +// TrimRight is the equivalent of strings.TrimRight +func TrimRight(s string, cutset byte) string { + lenStr := len(s) + for lenStr > 0 && s[lenStr-1] == cutset { + lenStr-- + } + return s[:lenStr] +} diff --git a/vendor/github.com/msaf1980/go-stringutils/unsafe.go b/vendor/github.com/msaf1980/go-stringutils/unsafe.go index bb96b877d..481b5ac89 100644 --- a/vendor/github.com/msaf1980/go-stringutils/unsafe.go +++ b/vendor/github.com/msaf1980/go-stringutils/unsafe.go @@ -15,11 +15,23 @@ func UnsafeStringFromPtr(ptr *byte, length int) (s string) { str := (*reflect.StringHeader)(unsafe.Pointer(&s)) str.Data = uintptr(unsafe.Pointer(ptr)) str.Len = length - - return s + return } // UnsafeStringBytes returns the string bytes func UnsafeStringBytes(s *string) []byte { return *(*[]byte)(unsafe.Pointer((*reflect.SliceHeader)(unsafe.Pointer(s)))) } + +// UnsafeStringBytePtr returns the string byte ptr +func UnsafeStringBytePtr(s string) *byte { + return &(*(*[]byte)(unsafe.Pointer((*reflect.SliceHeader)(unsafe.Pointer(&s)))))[0] +} + +func UnsafeBytes(ptr *byte, length, cap int) (b []byte) { + bs := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bs.Data = uintptr(unsafe.Pointer(ptr)) + bs.Len = length + bs.Cap = cap + return *(*[]byte)(unsafe.Pointer(&b)) +} diff --git a/vendor/github.com/msaf1980/go-stringutils/unsafe_1.go b/vendor/github.com/msaf1980/go-stringutils/unsafe_1.go new file mode 100644 index 000000000..3ec1bc976 --- /dev/null +++ b/vendor/github.com/msaf1980/go-stringutils/unsafe_1.go @@ -0,0 +1,7 @@ +package stringutils + +// compability with Golang 1.20 proposal https://github.com/golang/go/issues/53003 +var ( + String func(ptr *byte, length int) string = UnsafeStringFromPtr + StringData func(str string) *byte = UnsafeStringBytePtr +) diff --git a/vendor/github.com/msaf1980/go-stringutils/uuid.go b/vendor/github.com/msaf1980/go-stringutils/uuid.go new file mode 100644 index 000000000..c3963a115 --- /dev/null +++ b/vendor/github.com/msaf1980/go-stringutils/uuid.go @@ -0,0 +1,55 @@ +package stringutils + +import ( + "crypto/rand" + "encoding/binary" + "encoding/hex" + "sync/atomic" +) + +const ( + emptyUUID = "00000000-0000-0000-0000-000000000000" +) + +var ( + uuidSeed [24]byte + uuidCounter uint64 +) + +func init() { + // Setup seed & counter once + if _, err := rand.Read(uuidSeed[:]); err != nil { + return + } + uuidCounter = binary.LittleEndian.Uint64(uuidSeed[:8]) +} + +// UUID generates an universally unique identifier (UUID) +func UUID() string { + if atomic.LoadUint64(&uuidCounter) <= 0 { + return emptyUUID + } + // first 8 bytes differ, taking a slice of the first 16 bytes + x := atomic.AddUint64(&uuidCounter, 1) + uuid := uuidSeed + binary.LittleEndian.PutUint64(uuid[:8], x) + uuid[6], uuid[9] = uuid[9], uuid[6] + + // RFC4122 v4 + uuid[6] = (uuid[6] & 0x0f) | 0x40 + uuid[8] = uuid[8]&0x3f | 0x80 + + // create UUID representation of the first 128 bits + b := make([]byte, 36) + hex.Encode(b[0:8], uuid[0:4]) + b[8] = '-' + hex.Encode(b[9:13], uuid[4:6]) + b[13] = '-' + hex.Encode(b[14:18], uuid[6:8]) + b[18] = '-' + hex.Encode(b[19:23], uuid[8:10]) + b[23] = '-' + hex.Encode(b[24:], uuid[10:16]) + + return UnsafeString(b) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 7b98bcfdc..f79f55915 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -215,7 +215,7 @@ github.com/msaf1980/go-expirecache ## explicit; go 1.16 github.com/msaf1980/go-metrics github.com/msaf1980/go-metrics/graphite -# github.com/msaf1980/go-stringutils v0.1.4 +# github.com/msaf1980/go-stringutils v0.1.6 ## explicit; go 1.16 github.com/msaf1980/go-stringutils # github.com/msaf1980/go-syncutils v0.0.3