diff --git a/go.mod b/go.mod index d0167312e7..9ac8314d47 100644 --- a/go.mod +++ b/go.mod @@ -6,18 +6,18 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 github.com/CortexFoundation/inference v1.0.2-0.20230307032835-9197d586a4e8 github.com/CortexFoundation/statik v0.0.0-20210315012922-8bb8a7b5dc66 - github.com/CortexFoundation/torrentfs v1.0.56-0.20231216192420-89d0e1363db7 + github.com/CortexFoundation/torrentfs v1.0.56-0.20231221154219-46968989e786 github.com/VictoriaMetrics/fastcache v1.12.2 github.com/arsham/figurine v1.3.0 github.com/aws/aws-sdk-go-v2 v1.24.0 - github.com/aws/aws-sdk-go-v2/config v1.26.1 - github.com/aws/aws-sdk-go-v2/credentials v1.16.12 - github.com/aws/aws-sdk-go-v2/service/route53 v1.35.5 + github.com/aws/aws-sdk-go-v2/config v1.26.2 + github.com/aws/aws-sdk-go-v2/credentials v1.16.13 + github.com/aws/aws-sdk-go-v2/service/route53 v1.36.0 github.com/btcsuite/btcd/btcec/v2 v2.3.2 github.com/cespare/cp v1.1.1 github.com/charmbracelet/bubbletea v0.25.0 github.com/cloudflare/cloudflare-go v0.57.1 - github.com/cockroachdb/pebble v0.0.0-20231214172447-ab4952c5f87b + github.com/cockroachdb/pebble v0.0.0-20231220182916-5be92739e7bb github.com/consensys/gnark-crypto v0.12.1 github.com/crate-crypto/go-kzg-4844 v0.7.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc @@ -61,8 +61,8 @@ require ( github.com/ucwong/color v1.10.1-0.20200624105241-fba1e010fe1e github.com/urfave/cli/v2 v2.26.0 go.uber.org/automaxprocs v1.5.3 - golang.org/x/crypto v0.16.0 - golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611 + golang.org/x/crypto v0.17.0 + golang.org/x/exp v0.0.0-20231219180239-dc181d75b848 golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f golang.org/x/sync v0.5.0 golang.org/x/sys v0.15.0 @@ -81,9 +81,9 @@ require ( github.com/CortexFoundation/cvm-runtime v0.0.0-20221117094012-b5a251885572 // indirect github.com/CortexFoundation/merkletree v0.0.0-20230724124840-b6e80265a137 // indirect github.com/CortexFoundation/robot v1.0.7-0.20231001204802-6299a9ffd845 // indirect - github.com/CortexFoundation/wormhole v0.0.2-0.20231018202213-693acd0cc941 // indirect + github.com/CortexFoundation/wormhole v0.0.2-0.20231221153655-0321e1fe971c // indirect github.com/DataDog/zstd v1.5.6-0.20230622172052-ea68dcab66c0 // indirect - github.com/RoaringBitmap/roaring v1.6.0 // indirect + github.com/RoaringBitmap/roaring v1.7.0 // indirect github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0 // indirect github.com/alecthomas/atomic v0.1.0-alpha2 // indirect github.com/anacrolix/chansync v0.3.0 // indirect @@ -114,7 +114,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.26.6 // indirect github.com/aws/smithy-go v1.19.0 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect @@ -134,7 +134,7 @@ require ( github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/crate-crypto/go-ipa v0.0.0-20231205143816-408dbffb2041 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect - github.com/dgraph-io/badger/v4 v4.2.1-0.20231013074411-fb1b00959581 // indirect + github.com/dgraph-io/badger/v4 v4.2.1-0.20231218065111-7b5baa11879c // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dlclark/regexp2 v1.10.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect @@ -145,7 +145,7 @@ require ( github.com/getsentry/sentry-go v0.25.0 // indirect github.com/go-llsqlite/adapter v0.1.0 // indirect github.com/go-llsqlite/crawshaw v0.5.0 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/logr v1.4.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-resty/resty/v2 v2.10.0 // indirect @@ -178,7 +178,7 @@ require ( github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect github.com/muesli/cancelreader v0.2.2 // indirect github.com/naoina/go-stringutil v0.1.0 // indirect - github.com/nutsdb/nutsdb v1.0.3-0.20231216104420-099901a09ea1 // indirect + github.com/nutsdb/nutsdb v1.0.3-0.20231219150513-49211584c107 // indirect github.com/nxadm/tail v1.4.11 // indirect github.com/oapi-codegen/runtime v1.1.0 // indirect github.com/otiai10/copy v1.14.0 // indirect @@ -221,7 +221,7 @@ require ( github.com/tklauser/numcpus v0.7.0 // indirect github.com/ucwong/filecache v1.0.6-0.20230405163841-810d53ced4bd // indirect github.com/ucwong/go-ttlmap v1.0.2-0.20221020173635-331e7ddde2bb // indirect - github.com/ucwong/golang-kv v1.0.23-0.20231216115725-4f38a0fd08a6 // indirect + github.com/ucwong/golang-kv v1.0.23-0.20231220222728-54b1adf96ed4 // indirect github.com/ucwong/shard v1.0.1-0.20230924231639-2ac2d8ab288c // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/xrash/smetrics v0.0.0-20231213231151-1d8dd44e695e // indirect @@ -238,7 +238,7 @@ require ( golang.org/x/net v0.19.0 // indirect golang.org/x/term v0.15.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - modernc.org/libc v1.37.6 // indirect + modernc.org/libc v1.38.0 // indirect modernc.org/mathutil v1.6.0 // indirect modernc.org/memory v1.7.2 // indirect modernc.org/sqlite v1.28.0 // indirect diff --git a/go.sum b/go.sum index 6fe405dc0d..454ec3e4d9 100644 --- a/go.sum +++ b/go.sum @@ -66,10 +66,10 @@ github.com/CortexFoundation/statik v0.0.0-20210315012922-8bb8a7b5dc66/go.mod h1: github.com/CortexFoundation/torrentfs v1.0.13-0.20200623060705-ce027f43f2f8/go.mod h1:Ma+tGhPPvz4CEZHaqEJQMOEGOfHeQBiAoNd1zyc/w3Q= github.com/CortexFoundation/torrentfs v1.0.14-0.20200703071639-3fcabcabf274/go.mod h1:qnb3YlIJmuetVBtC6Lsejr0Xru+1DNmDCdTqnwy7lhk= github.com/CortexFoundation/torrentfs v1.0.20-0.20200810031954-d36d26f82fcc/go.mod h1:N5BsicP5ynjXIi/Npl/SRzlJ630n1PJV2sRj0Z0t2HA= -github.com/CortexFoundation/torrentfs v1.0.56-0.20231216192420-89d0e1363db7 h1:bqinS9WJI64fMMXtZD2MrwpY4pxBBzbnPi3Li/UVN6E= -github.com/CortexFoundation/torrentfs v1.0.56-0.20231216192420-89d0e1363db7/go.mod h1:sxfHgfgCN+diMIlZ/P6aDT6Gee0M3RfrwJPx8DRxmUg= -github.com/CortexFoundation/wormhole v0.0.2-0.20231018202213-693acd0cc941 h1:2lhbBGb0Q/v2Uk2P1qGWK//jmrZUGROZzME8psO9QNA= -github.com/CortexFoundation/wormhole v0.0.2-0.20231018202213-693acd0cc941/go.mod h1:ipzmPabDgzYKUbXkGVe2gTkBEp+MsDx6pXGiuYzmP6s= +github.com/CortexFoundation/torrentfs v1.0.56-0.20231221154219-46968989e786 h1:hjJ6rTF5DrzEKIEApjMVoqyWACz1woM+Uuv/gRiIPr0= +github.com/CortexFoundation/torrentfs v1.0.56-0.20231221154219-46968989e786/go.mod h1:wzPTFeyx5Selwk+xKxtTkZyOLbbQM8gme6fQ9fCGIWI= +github.com/CortexFoundation/wormhole v0.0.2-0.20231221153655-0321e1fe971c h1:YrwMnXM72z0F+jxPXPX3gnP2pMCsLBXqqHSvt5hergg= +github.com/CortexFoundation/wormhole v0.0.2-0.20231221153655-0321e1fe971c/go.mod h1:ipzmPabDgzYKUbXkGVe2gTkBEp+MsDx6pXGiuYzmP6s= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/zstd v1.5.6-0.20230622172052-ea68dcab66c0 h1:ye3LRgDs6Og7SKC1wBQH8oMaGczhCRpPpnU74l4rma8= github.com/DataDog/zstd v1.5.6-0.20230622172052-ea68dcab66c0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= @@ -82,8 +82,8 @@ github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrX github.com/RoaringBitmap/roaring v0.4.18/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/RoaringBitmap/roaring v1.6.0 h1:dc7kRiroETgJcHhWX6BerXkZz2b3JgLGg9nTURJL/og= -github.com/RoaringBitmap/roaring v1.6.0/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE= +github.com/RoaringBitmap/roaring v1.7.0 h1:OZF303tJCER1Tj3x+aArx/S5X7hrT186ri6JjrGvG68= +github.com/RoaringBitmap/roaring v1.7.0/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= @@ -255,10 +255,10 @@ github.com/aws/aws-sdk-go v1.31.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk= github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= -github.com/aws/aws-sdk-go-v2/config v1.26.1 h1:z6DqMxclFGL3Zfo+4Q0rLnAZ6yVkzCRxhRMsiRQnD1o= -github.com/aws/aws-sdk-go-v2/config v1.26.1/go.mod h1:ZB+CuKHRbb5v5F0oJtGdhFTelmrxd4iWO1lf0rQwSAg= -github.com/aws/aws-sdk-go-v2/credentials v1.16.12 h1:v/WgB8NxprNvr5inKIiVVrXPuuTegM+K8nncFkr1usU= -github.com/aws/aws-sdk-go-v2/credentials v1.16.12/go.mod h1:X21k0FjEJe+/pauud82HYiQbEr9jRKY3kXEIQ4hXeTQ= +github.com/aws/aws-sdk-go-v2/config v1.26.2 h1:+RWLEIWQIGgrz2pBPAUoGgNGs1TOyF4Hml7hCnYj2jc= +github.com/aws/aws-sdk-go-v2/config v1.26.2/go.mod h1:l6xqvUxt0Oj7PI/SUXYLNyZ9T/yBPn3YTQcJLLOdtR8= +github.com/aws/aws-sdk-go-v2/credentials v1.16.13 h1:WLABQ4Cp4vXtXfOWOS3MEZKr6AAYUpMczLhgKtAjQ/8= +github.com/aws/aws-sdk-go-v2/credentials v1.16.13/go.mod h1:Qg6x82FXwW0sJHzYruxGiuApNo31UEtJvXVSZAXeWiw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 h1:w98BT5w+ao1/r5sUuiH6JkVzjowOKeOJRHERyy1vh58= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10/go.mod h1:K2WGI7vUvkIv1HoNbfBA1bvIZ+9kL3YVmWxeKuLQsiw= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 h1:v+HbZaCGmOwnTTVS86Fleq0vPzOd7tnJGbFhP0stNLs= @@ -271,14 +271,14 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 h1:Nf2sHxjMJR8CSImIVCONRi4g0Su3J+TSTbS7G0pUeMU= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9/go.mod h1:idky4TER38YIjr2cADF1/ugFMKvZV7p//pVeV5LZbF0= -github.com/aws/aws-sdk-go-v2/service/route53 v1.35.5 h1:WVQIKVwv56JY+I0b2fFeRGCTSi/Xupa87z7y8HZ6l5g= -github.com/aws/aws-sdk-go-v2/service/route53 v1.35.5/go.mod h1:F9El48+5Tf+TkYJB/6M9H7oqXw9Mr9eVetwJ6SUql7g= +github.com/aws/aws-sdk-go-v2/service/route53 v1.36.0 h1:7wh6KdJnej4T7sE/xfnZf5T+GQzp6GfoZi+5r6ZPlW8= +github.com/aws/aws-sdk-go-v2/service/route53 v1.36.0/go.mod h1:F9El48+5Tf+TkYJB/6M9H7oqXw9Mr9eVetwJ6SUql7g= github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 h1:ldSFWz9tEHAwHNmjx2Cvy1MjP5/L9kNoR0skc6wyOOM= github.com/aws/aws-sdk-go-v2/service/sso v1.18.5/go.mod h1:CaFfXLYL376jgbP7VKC96uFcU8Rlavak0UlAwk1Dlhc= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 h1:2k9KmFawS63euAkY4/ixVNsYYwrwnd5fIvgEKkfZFNM= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5/go.mod h1:W+nd4wWDVkSUIox9bacmkBP5NMFQeTJ/xqNabpzSR38= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 h1:5UYvv8JUvllZsRnfrcMQ+hJ9jNICmcgKPAO1CER25Wg= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.5/go.mod h1:XX5gh4CB7wAs4KhcF46G6C8a2i7eupU19dcAAE+EydU= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.6 h1:HJeiuZ2fldpd0WqngyMR6KW7ofkXNLyOaHwEIGm39Cs= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.6/go.mod h1:XX5gh4CB7wAs4KhcF46G6C8a2i7eupU19dcAAE+EydU= github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= @@ -294,7 +294,6 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.1-0.20200416141419-39a59b1b2866/go.mod h1:bXegrmTNBg3jTbSwV0BSBcSSfHHctupCgavZr/gX5fo= -github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA= github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= @@ -355,8 +354,8 @@ github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZe github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= -github.com/cockroachdb/pebble v0.0.0-20231214172447-ab4952c5f87b h1:r3BhKev3k3GhdEGl9PMSO1MBZu5P1D+JXAcXI4O21UY= -github.com/cockroachdb/pebble v0.0.0-20231214172447-ab4952c5f87b/go.mod h1:BHuaMa/lK7fUe75BlsteiiTu8ptIG+qSAuDtGMArP18= +github.com/cockroachdb/pebble v0.0.0-20231220182916-5be92739e7bb h1:Qg5wrfcdOmkb7EshjGTtrZEhG2W/sfDdZF/bAYGv0GY= +github.com/cockroachdb/pebble v0.0.0-20231220182916-5be92739e7bb/go.mod h1:BHuaMa/lK7fUe75BlsteiiTu8ptIG+qSAuDtGMArP18= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= @@ -402,8 +401,8 @@ github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5il github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= -github.com/dgraph-io/badger/v4 v4.2.1-0.20231013074411-fb1b00959581 h1:yy45brf1ktmnkTCZlHynP1gRlVwZ9g19oz5D9wG81v4= -github.com/dgraph-io/badger/v4 v4.2.1-0.20231013074411-fb1b00959581/go.mod h1:T/uWAYxrXdaXw64ihI++9RMbKTCpKd/yE9+saARew7k= +github.com/dgraph-io/badger/v4 v4.2.1-0.20231218065111-7b5baa11879c h1:djS/nBZaITLbddF4hf2GoF/xr9Ed2po5Emx3GLV0eE0= +github.com/dgraph-io/badger/v4 v4.2.1-0.20231218065111-7b5baa11879c/go.mod h1:T/uWAYxrXdaXw64ihI++9RMbKTCpKd/yE9+saARew7k= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -515,8 +514,8 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.0 h1:wx+BduGRXjIL6VPeeb7DRX+ii7sR/ch8DlRifHR589o= +github.com/go-logr/logr v1.4.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= @@ -895,8 +894,8 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/nutsdb/nutsdb v1.0.3-0.20231216104420-099901a09ea1 h1:FTSPo9cWs+wp+ezHZsMhUJ+FT32SplsIixkR27UriGA= -github.com/nutsdb/nutsdb v1.0.3-0.20231216104420-099901a09ea1/go.mod h1:jIbbpBXajzTMZ0o33Yn5zoYIo3v0Dz4WstkVce+sYuQ= +github.com/nutsdb/nutsdb v1.0.3-0.20231219150513-49211584c107 h1:BEfl/s5yDPmmZLjstgLDxQuOiq8tBszBZiT+1+TNsnk= +github.com/nutsdb/nutsdb v1.0.3-0.20231219150513-49211584c107/go.mod h1:jIbbpBXajzTMZ0o33Yn5zoYIo3v0Dz4WstkVce+sYuQ= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= @@ -1250,8 +1249,8 @@ github.com/ucwong/filecache v1.0.6-0.20230405163841-810d53ced4bd h1:gBtlvLAsgLk+ github.com/ucwong/filecache v1.0.6-0.20230405163841-810d53ced4bd/go.mod h1:ddwX+NCjMZPdpzcGh1fcEbNTUTCtKgt2hC2rqvmLKgA= github.com/ucwong/go-ttlmap v1.0.2-0.20221020173635-331e7ddde2bb h1:dVZH3AH9f7zB3VBmsjn25B7lfcAyMP4QxdFYTrfj7tg= github.com/ucwong/go-ttlmap v1.0.2-0.20221020173635-331e7ddde2bb/go.mod h1:3yswsBsVuwsOjDvFfC5Na9XSEf4HC7mj3W3g6jvSY/s= -github.com/ucwong/golang-kv v1.0.23-0.20231216115725-4f38a0fd08a6 h1:zRYCKxPZ3VE7O9aiHwdMXen/BUUwHTwkK03cFoa2Th4= -github.com/ucwong/golang-kv v1.0.23-0.20231216115725-4f38a0fd08a6/go.mod h1:cgGb6l+j9mE0g0eZ4IS20jNHDkl3WFSXoHwgHZIPod4= +github.com/ucwong/golang-kv v1.0.23-0.20231220222728-54b1adf96ed4 h1:WZjjcY63Qh+LQlqLQpYdvcB278rkUJvN3GBgfQw2ZIk= +github.com/ucwong/golang-kv v1.0.23-0.20231220222728-54b1adf96ed4/go.mod h1:qxnAZV4g3a7WDjzXs4+EA4m5+nfMuwvnIWpXIJN+T70= github.com/ucwong/golang-set v1.8.1-0.20200419153428-d7b0b1ac2d43/go.mod h1:xu0FaiQFGbBcFZj2o7udZ5rbA8jRTsv47hkPoG5qQNM= github.com/ucwong/goleveldb v1.0.3-0.20200508074755-578cba616f37/go.mod h1:dgJUTtDxq/ne6/JzZhHzF24OL/uqILz9IWk8HmT4V2g= github.com/ucwong/goleveldb v1.0.3-0.20200618184106-f1c6bc3a428b/go.mod h1:7Sq6w7AfEZuB/a6mrlvHCSXCSkqojCMMrM3Ei12QAT0= @@ -1365,8 +1364,8 @@ golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIi golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1379,8 +1378,8 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20220428152302-39d4317da171/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= -golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611 h1:qCEDpW1G+vcj3Y7Fy52pEM1AWm3abj8WimGYejI3SC4= -golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/exp v0.0.0-20231219180239-dc181d75b848 h1:+iq7lrkxmFNBM7xx+Rae2W6uyPfhPeDWD+n+JgppptE= +golang.org/x/exp v0.0.0-20231219180239-dc181d75b848/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1794,8 +1793,8 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -modernc.org/libc v1.37.6 h1:orZH3c5wmhIQFTXF+Nt+eeauyd+ZIt2BX6ARe+kD+aw= -modernc.org/libc v1.37.6/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE= +modernc.org/libc v1.38.0 h1:o4Lpk0zNDSdsjfEXnF1FGXWQ9PDi1NOdWcLP5n13FGo= +modernc.org/libc v1.38.0/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE= modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E= diff --git a/vendor/github.com/CortexFoundation/torrentfs/backend/api.go b/vendor/github.com/CortexFoundation/torrentfs/backend/api.go index 02ae67a720..76926ab8fd 100644 --- a/vendor/github.com/CortexFoundation/torrentfs/backend/api.go +++ b/vendor/github.com/CortexFoundation/torrentfs/backend/api.go @@ -223,9 +223,14 @@ func (tm *TorrentManager) Search(ctx context.Context, hex string, request uint64 downloadMeter.Mark(1) + if request == 0 { + // TODO + } + return tm.commit(ctx, hex, request) } +// Add torrent to the leeching loop func (tm *TorrentManager) commit(ctx context.Context, hex string, request uint64) error { select { case tm.taskChan <- types.NewBitsFlow(hex, request): diff --git a/vendor/github.com/CortexFoundation/torrentfs/backend/caffe/torrent.go b/vendor/github.com/CortexFoundation/torrentfs/backend/caffe/torrent.go index a50641f91d..fac91e2618 100644 --- a/vendor/github.com/CortexFoundation/torrentfs/backend/caffe/torrent.go +++ b/vendor/github.com/CortexFoundation/torrentfs/backend/caffe/torrent.go @@ -65,6 +65,7 @@ type Torrent struct { spec *torrent.TorrentSpec //jobCh chan bool + priority int } type task struct { diff --git a/vendor/github.com/CortexFoundation/torrentfs/backend/handler.go b/vendor/github.com/CortexFoundation/torrentfs/backend/handler.go index a76594d260..33e5aa5196 100644 --- a/vendor/github.com/CortexFoundation/torrentfs/backend/handler.go +++ b/vendor/github.com/CortexFoundation/torrentfs/backend/handler.go @@ -532,6 +532,7 @@ func (tm *TorrentManager) addInfoHash(ih string, bytesRequested int64) *caffe.To } } +// Start the torrent leeching func (tm *TorrentManager) injectSpec(ih string, spec *torrent.TorrentSpec) (*torrent.Torrent, error) { if t, n, err := tm.client.AddTorrentSpec(spec); err == nil { if !n { @@ -952,7 +953,6 @@ func (tm *TorrentManager) mainLoop() { if tt, err := tm.injectSpec(t.InfoHash(), t.Spec()); err == nil && tt != nil { t.SetStatus(caffe.TorrentPending) t.Lock() - //t.status = torrentPending t.Torrent = tt t.SetStart(mclock.Now()) t.Unlock() diff --git a/vendor/github.com/CortexFoundation/wormhole/config.go b/vendor/github.com/CortexFoundation/wormhole/config.go index 1c48e8c790..64f5909fb7 100644 --- a/vendor/github.com/CortexFoundation/wormhole/config.go +++ b/vendor/github.com/CortexFoundation/wormhole/config.go @@ -28,6 +28,10 @@ var ( BestTrackerUrl = []string{"https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_best.txt", "https://cdn.jsdelivr.net/gh/ngosang/trackerslist@master/trackers_best.txt", "https://ngosang.github.io/trackerslist/trackers_best.txt", + // ip addr trackers + "https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_best_ip.txt", + "https://ngosang.github.io/trackerslist/trackers_best_ip.txt", + "https://cdn.jsdelivr.net/gh/ngosang/trackerslist@master/trackers_best_ip.txt", } ColaUrl = []string{"https://github.com/CortexFoundation/cola/releases/download/1.0.0/cola.txt"} diff --git a/vendor/github.com/RoaringBitmap/roaring/.drone.yml b/vendor/github.com/RoaringBitmap/roaring/.drone.yml index 698cd0e7a7..7936bfe8df 100644 --- a/vendor/github.com/RoaringBitmap/roaring/.drone.yml +++ b/vendor/github.com/RoaringBitmap/roaring/.drone.yml @@ -11,7 +11,6 @@ steps: commands: - go get -t - go test - - go test -race -run TestConcurrent* - go build -tags appengine - go test -tags appengine - GOARCH=386 go build diff --git a/vendor/github.com/RoaringBitmap/roaring/Makefile b/vendor/github.com/RoaringBitmap/roaring/Makefile deleted file mode 100644 index 0a4f9f0aae..0000000000 --- a/vendor/github.com/RoaringBitmap/roaring/Makefile +++ /dev/null @@ -1,107 +0,0 @@ -.PHONY: help all test format fmtcheck vet lint qa deps clean nuke ser fetch-real-roaring-datasets - - - - - - - - -# Display general help about this command -help: - @echo "" - @echo "The following commands are available:" - @echo "" - @echo " make qa : Run all the tests" - @echo " make test : Run the unit tests" - @echo "" - @echo " make format : Format the source code" - @echo " make fmtcheck : Check if the source code has been formatted" - @echo " make vet : Check for suspicious constructs" - @echo " make lint : Check for style errors" - @echo "" - @echo " make deps : Get the dependencies" - @echo " make clean : Remove any build artifact" - @echo " make nuke : Deletes any intermediate file" - @echo "" - @echo " make fuzz-smat : Fuzzy testing with smat" - @echo " make fuzz-stream : Fuzzy testing with stream deserialization" - @echo " make fuzz-buffer : Fuzzy testing with buffer deserialization" - @echo "" - -# Alias for help target -all: help -test: - go test - go test -race -run TestConcurrent* -# Format the source code -format: - @find ./ -type f -name "*.go" -exec gofmt -w {} \; - -# Check if the source code has been formatted -fmtcheck: - @mkdir -p target - @find ./ -type f -name "*.go" -exec gofmt -d {} \; | tee target/format.diff - @test ! -s target/format.diff || { echo "ERROR: the source code has not been formatted - please use 'make format' or 'gofmt'"; exit 1; } - -# Check for syntax errors -vet: - GOPATH=$(GOPATH) go vet ./... - -# Check for style errors -lint: - GOPATH=$(GOPATH) PATH=$(GOPATH)/bin:$(PATH) golint ./... - - - - - -# Alias to run all quality-assurance checks -qa: fmtcheck test vet lint - -# --- INSTALL --- - -# Get the dependencies -deps: - GOPATH=$(GOPATH) go get github.com/stretchr/testify - GOPATH=$(GOPATH) go get github.com/bits-and-blooms/bitset - GOPATH=$(GOPATH) go get github.com/golang/lint/golint - GOPATH=$(GOPATH) go get github.com/mschoch/smat - GOPATH=$(GOPATH) go get github.com/dvyukov/go-fuzz/go-fuzz - GOPATH=$(GOPATH) go get github.com/dvyukov/go-fuzz/go-fuzz-build - GOPATH=$(GOPATH) go get github.com/glycerine/go-unsnap-stream - GOPATH=$(GOPATH) go get github.com/philhofer/fwd - GOPATH=$(GOPATH) go get github.com/jtolds/gls - -fuzz-smat: - go test -tags=gofuzz -run=TestGenerateSmatCorpus - go-fuzz-build -func FuzzSmat github.com/RoaringBitmap/roaring - go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200 - - -fuzz-stream: - go-fuzz-build -func FuzzSerializationStream github.com/RoaringBitmap/roaring - go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200 - - -fuzz-buffer: - go-fuzz-build -func FuzzSerializationBuffer github.com/RoaringBitmap/roaring - go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200 - -# Remove any build artifact -clean: - GOPATH=$(GOPATH) go clean ./... - -# Deletes any intermediate file -nuke: - rm -rf ./target - GOPATH=$(GOPATH) go clean -i ./... - -cover: - go test -coverprofile=coverage.out - go tool cover -html=coverage.out - -fetch-real-roaring-datasets: - # pull github.com/RoaringBitmap/real-roaring-datasets -> testdata/real-roaring-datasets - git submodule init - git submodule update diff --git a/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go b/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go index 9541fd5369..a575caff83 100644 --- a/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go +++ b/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go @@ -17,8 +17,17 @@ func (ac *arrayContainer) String() string { } func (ac *arrayContainer) fillLeastSignificant16bits(x []uint32, i int, mask uint32) int { + if i < 0 { + panic("negative index") + } + if len(ac.content) == 0 { + return i + } + _ = x[len(ac.content)-1+i] + _ = ac.content[len(ac.content)-1] for k := 0; k < len(ac.content); k++ { - x[k+i] = uint32(ac.content[k]) | mask + x[k+i] = + uint32(ac.content[k]) | mask } return i + len(ac.content) } diff --git a/vendor/github.com/RoaringBitmap/roaring/roaring.go b/vendor/github.com/RoaringBitmap/roaring/roaring.go index cdf543ce51..a31cdbd9e8 100644 --- a/vendor/github.com/RoaringBitmap/roaring/roaring.go +++ b/vendor/github.com/RoaringBitmap/roaring/roaring.go @@ -13,6 +13,7 @@ import ( "strconv" "github.com/RoaringBitmap/roaring/internal" + "github.com/bits-and-blooms/bitset" ) // Bitmap represents a compressed bitmap where you can add integers. @@ -53,13 +54,182 @@ func (rb *Bitmap) ToBytes() ([]byte, error) { return rb.highlowcontainer.toBytes() } +const wordSize = uint64(64) +const log2WordSize = uint64(6) +const capacity = ^uint64(0) +const bitmapContainerSize = (1 << 16) / 64 // bitmap size in words + +// DenseSize returns the size of the bitmap when stored as a dense bitmap. +func (rb *Bitmap) DenseSize() uint64 { + if rb.highlowcontainer.size() == 0 { + return 0 + } + + maximum := 1 + uint64(rb.Maximum()) + if maximum > (capacity - wordSize + 1) { + return uint64(capacity >> log2WordSize) + } + + return uint64((maximum + (wordSize - 1)) >> log2WordSize) +} + +// ToDense returns a slice of uint64s representing the bitmap as a dense bitmap. +// Useful to convert a roaring bitmap to a format that can be used by other libraries +// like https://github.com/bits-and-blooms/bitset or https://github.com/kelindar/bitmap +func (rb *Bitmap) ToDense() []uint64 { + sz := rb.DenseSize() + if sz == 0 { + return nil + } + + bitmap := make([]uint64, sz) + rb.WriteDenseTo(bitmap) + return bitmap +} + +// FromDense creates a bitmap from a slice of uint64s representing the bitmap as a dense bitmap. +// Useful to convert bitmaps from libraries like https://github.com/bits-and-blooms/bitset or +// https://github.com/kelindar/bitmap into roaring bitmaps fast and with convenience. +// +// This function will not create any run containers, only array and bitmap containers. It's up to +// the caller to call RunOptimize if they want to further compress the runs of consecutive values. +// +// When doCopy is true, the bitmap is copied into a new slice for each bitmap container. +// This is useful when the bitmap is going to be modified after this function returns or if it's +// undesirable to hold references to large bitmaps which the GC would not be able to collect. +// One copy can still happen even when doCopy is false if the bitmap length is not divisible +// by bitmapContainerSize. +// +// See also FromBitSet. +func FromDense(bitmap []uint64, doCopy bool) *Bitmap { + sz := (len(bitmap) + bitmapContainerSize - 1) / bitmapContainerSize // round up + rb := &Bitmap{ + highlowcontainer: roaringArray{ + containers: make([]container, 0, sz), + keys: make([]uint16, 0, sz), + needCopyOnWrite: make([]bool, 0, sz), + }, + } + rb.FromDense(bitmap, doCopy) + return rb +} + +// FromDense unmarshalls from a slice of uint64s representing the bitmap as a dense bitmap. +// Useful to convert bitmaps from libraries like https://github.com/bits-and-blooms/bitset or +// https://github.com/kelindar/bitmap into roaring bitmaps fast and with convenience. +// Callers are responsible for ensuring that the bitmap is empty before calling this function. +// +// This function will not create any run containers, only array and bitmap containers. It is up to +// the caller to call RunOptimize if they want to further compress the runs of consecutive values. +// +// When doCopy is true, the bitmap is copied into a new slice for each bitmap container. +// This is useful when the bitmap is going to be modified after this function returns or if it's +// undesirable to hold references to large bitmaps which the GC would not be able to collect. +// One copy can still happen even when doCopy is false if the bitmap length is not divisible +// by bitmapContainerSize. +// +// See FromBitSet. +func (rb *Bitmap) FromDense(bitmap []uint64, doCopy bool) { + if len(bitmap) == 0 { + return + } + + var k uint16 + const size = bitmapContainerSize + + for len(bitmap) > 0 { + hi := size + if len(bitmap) < size { + hi = len(bitmap) + } + + words := bitmap[:hi] + count := int(popcntSlice(words)) + + switch { + case count > arrayDefaultMaxSize: + c := &bitmapContainer{cardinality: count, bitmap: words} + cow := true + + if doCopy || len(words) < size { + c.bitmap = make([]uint64, size) + copy(c.bitmap, words) + cow = false + } + + rb.highlowcontainer.appendContainer(k, c, cow) + + case count > 0: + c := &arrayContainer{content: make([]uint16, count)} + var pos, base int + for _, w := range words { + for w != 0 { + t := w & -w + c.content[pos] = uint16(base + int(popcount(t-1))) + pos++ + w ^= t + } + base += 64 + } + rb.highlowcontainer.appendContainer(k, c, false) + } + + bitmap = bitmap[hi:] + k++ + } +} + +// WriteDenseTo writes to a slice of uint64s representing the bitmap as a dense bitmap. +// Callers are responsible for allocating enough space in the bitmap using DenseSize. +// Useful to convert a roaring bitmap to a format that can be used by other libraries +// like https://github.com/bits-and-blooms/bitset or https://github.com/kelindar/bitmap +func (rb *Bitmap) WriteDenseTo(bitmap []uint64) { + for i, ct := range rb.highlowcontainer.containers { + hb := uint32(rb.highlowcontainer.keys[i]) << 16 + + switch c := ct.(type) { + case *arrayContainer: + for _, x := range c.content { + n := int(hb | uint32(x)) + bitmap[n>>log2WordSize] |= uint64(1) << uint(x%64) + } + + case *bitmapContainer: + copy(bitmap[int(hb)>>log2WordSize:], c.bitmap) + + case *runContainer16: + for j := range c.iv { + start := uint32(c.iv[j].start) + end := start + uint32(c.iv[j].length) + 1 + lo := int(hb|start) >> log2WordSize + hi := int(hb|(end-1)) >> log2WordSize + + if lo == hi { + bitmap[lo] |= (^uint64(0) << uint(start%64)) & + (^uint64(0) >> (uint(-end) % 64)) + continue + } + + bitmap[lo] |= ^uint64(0) << uint(start%64) + for n := lo + 1; n < hi; n++ { + bitmap[n] = ^uint64(0) + } + bitmap[hi] |= ^uint64(0) >> (uint(-end) % 64) + } + default: + panic("unsupported container type") + } + } +} + // Checksum computes a hash (currently FNV-1a) for a bitmap that is suitable for // using bitmaps as elements in hash sets or as keys in hash maps, as well as // generally quicker comparisons. // The implementation is biased towards efficiency in little endian machines, so // expect some extra CPU cycles and memory to be used if your machine is big endian. -// Likewise, don't use this to verify integrity unless you're certain you'll load -// the bitmap on a machine with the same endianess used to create it. +// Likewise, do not use this to verify integrity unless you are certain you will load +// the bitmap on a machine with the same endianess used to create it. (Thankfully +// very few people use big endian machines these days.) func (rb *Bitmap) Checksum() uint64 { const ( offset = 14695981039346656037 @@ -219,6 +389,16 @@ func (rb *Bitmap) Clear() { rb.highlowcontainer.clear() } +// ToBitSet copies the content of the RoaringBitmap into a bitset.BitSet instance +func (rb *Bitmap) ToBitSet() *bitset.BitSet { + return bitset.From(rb.ToDense()) +} + +// FromBitSet creates a new RoaringBitmap from a bitset.BitSet instance +func FromBitSet(bitset *bitset.BitSet) *Bitmap { + return FromDense(bitset.Bytes(), false) +} + // ToArray creates a new slice containing all of the integers stored in the Bitmap in sorted order func (rb *Bitmap) ToArray() []uint32 { array := make([]uint32, rb.GetCardinality()) @@ -258,7 +438,7 @@ func BoundSerializedSizeInBytes(cardinality uint64, universeSize uint64) uint64 contnbr := (universeSize + uint64(65535)) / uint64(65536) if contnbr > cardinality { contnbr = cardinality - // we can't have more containers than we have values + // we cannot have more containers than we have values } headermax := 8*contnbr + 4 if 4 > (contnbr+7)/8 { @@ -369,10 +549,10 @@ type IntIterator = intIterator // Initialize configures the existing iterator so that it can iterate through the values of // the provided bitmap. // The iteration results are undefined if the bitmap is modified (e.g., with Add or Remove). -func (p *intIterator) Initialize(a *Bitmap) { - p.pos = 0 - p.highlowcontainer = &a.highlowcontainer - p.init() +func (ii *intIterator) Initialize(a *Bitmap) { + ii.pos = 0 + ii.highlowcontainer = &a.highlowcontainer + ii.init() } type intReverseIterator struct { @@ -438,10 +618,10 @@ type IntReverseIterator = intReverseIterator // Initialize configures the existing iterator so that it can iterate through the values of // the provided bitmap. // The iteration results are undefined if the bitmap is modified (e.g., with Add or Remove). -func (p *intReverseIterator) Initialize(a *Bitmap) { - p.highlowcontainer = &a.highlowcontainer - p.pos = a.highlowcontainer.size() - 1 - p.init() +func (ii *intReverseIterator) Initialize(a *Bitmap) { + ii.highlowcontainer = &a.highlowcontainer + ii.pos = a.highlowcontainer.size() - 1 + ii.init() } // ManyIntIterable allows you to iterate over the values in a Bitmap @@ -525,10 +705,10 @@ type ManyIntIterator = manyIntIterator // Initialize configures the existing iterator so that it can iterate through the values of // the provided bitmap. // The iteration results are undefined if the bitmap is modified (e.g., with Add or Remove). -func (p *manyIntIterator) Initialize(a *Bitmap) { - p.pos = 0 - p.highlowcontainer = &a.highlowcontainer - p.init() +func (ii *manyIntIterator) Initialize(a *Bitmap) { + ii.pos = 0 + ii.highlowcontainer = &a.highlowcontainer + ii.init() } // String creates a string representation of the Bitmap @@ -870,7 +1050,7 @@ func (rb *Bitmap) Select(x uint32) (uint32, error) { return uint32(key)<<16 + uint32(c.selectInt(uint16(remaining))), nil } } - return 0, fmt.Errorf("can't find %dth integer in a bitmap with only %d items", x, rb.GetCardinality()) + return 0, fmt.Errorf("cannot find %dth integer in a bitmap with only %d items", x, rb.GetCardinality()) } // And computes the intersection between two bitmaps and stores the result in the current bitmap diff --git a/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go b/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go index b6fa7b05c9..6e3a5d554c 100644 --- a/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go +++ b/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go @@ -312,7 +312,7 @@ func (rb *Bitmap) FrozenView(buf []byte) error { * uint8_t[num_containers] *
uint32_t * - *
is a 4-byte value which is a bit union of FROZEN_COOKIE (15 bits) + *
is a 4-byte value which is a bit union of frozenCookie (15 bits) * and the number of containers (17 bits). * * stores number of elements for every container. @@ -328,43 +328,50 @@ func (rb *Bitmap) FrozenView(buf []byte) error { * All members have their native alignments during deserilization except
, * which is not guaranteed to be aligned by 4 bytes. */ -const FROZEN_COOKIE = 13766 +const frozenCookie = 13766 var ( - FrozenBitmapInvalidCookie = errors.New("header does not contain the FROZEN_COOKIE") - FrozenBitmapBigEndian = errors.New("loading big endian frozen bitmaps is not supported") - FrozenBitmapIncomplete = errors.New("input buffer too small to contain a frozen bitmap") - FrozenBitmapOverpopulated = errors.New("too many containers") - FrozenBitmapUnexpectedData = errors.New("spurious data in input") - FrozenBitmapInvalidTypecode = errors.New("unrecognized typecode") - FrozenBitmapBufferTooSmall = errors.New("buffer too small") + // ErrFrozenBitmapInvalidCookie is returned when the header does not contain the frozenCookie. + ErrFrozenBitmapInvalidCookie = errors.New("header does not contain the frozenCookie") + // ErrFrozenBitmapBigEndian is returned when the header is big endian. + ErrFrozenBitmapBigEndian = errors.New("loading big endian frozen bitmaps is not supported") + // ErrFrozenBitmapIncomplete is returned when the buffer is too small to contain a frozen bitmap. + ErrFrozenBitmapIncomplete = errors.New("input buffer too small to contain a frozen bitmap") + // ErrFrozenBitmapOverpopulated is returned when the number of containers is too large. + ErrFrozenBitmapOverpopulated = errors.New("too many containers") + // ErrFrozenBitmapUnexpectedData is returned when the buffer contains unexpected data. + ErrFrozenBitmapUnexpectedData = errors.New("spurious data in input") + // ErrFrozenBitmapInvalidTypecode is returned when the typecode is invalid. + ErrFrozenBitmapInvalidTypecode = errors.New("unrecognized typecode") + // ErrFrozenBitmapBufferTooSmall is returned when the buffer is too small. + ErrFrozenBitmapBufferTooSmall = errors.New("buffer too small") ) func (ra *roaringArray) frozenView(buf []byte) error { if len(buf) < 4 { - return FrozenBitmapIncomplete + return ErrFrozenBitmapIncomplete } headerBE := binary.BigEndian.Uint32(buf[len(buf)-4:]) - if headerBE&0x7fff == FROZEN_COOKIE { - return FrozenBitmapBigEndian + if headerBE&0x7fff == frozenCookie { + return ErrFrozenBitmapBigEndian } header := binary.LittleEndian.Uint32(buf[len(buf)-4:]) buf = buf[:len(buf)-4] - if header&0x7fff != FROZEN_COOKIE { - return FrozenBitmapInvalidCookie + if header&0x7fff != frozenCookie { + return ErrFrozenBitmapInvalidCookie } nCont := int(header >> 15) if nCont > (1 << 16) { - return FrozenBitmapOverpopulated + return ErrFrozenBitmapOverpopulated } // 1 byte per type, 2 bytes per key, 2 bytes per count. if len(buf) < 5*nCont { - return FrozenBitmapIncomplete + return ErrFrozenBitmapIncomplete } types := buf[len(buf)-nCont:] @@ -389,12 +396,12 @@ func (ra *roaringArray) frozenView(buf []byte) error { nRun++ nRunEl += int(counts[i]) default: - return FrozenBitmapInvalidTypecode + return ErrFrozenBitmapInvalidTypecode } } if len(buf) < (1<<13)*nBitmap+4*nRunEl+2*nArrayEl { - return FrozenBitmapIncomplete + return ErrFrozenBitmapIncomplete } bitsetsArena := byteSliceAsUint64Slice(buf[:(1<<13)*nBitmap]) @@ -407,7 +414,7 @@ func (ra *roaringArray) frozenView(buf []byte) error { buf = buf[2*nArrayEl:] if len(buf) != 0 { - return FrozenBitmapUnexpectedData + return ErrFrozenBitmapUnexpectedData } var c container @@ -474,9 +481,10 @@ func (ra *roaringArray) frozenView(buf []byte) error { return nil } -func (bm *Bitmap) GetFrozenSizeInBytes() uint64 { +// GetFrozenSizeInBytes returns the size in bytes of the frozen bitmap. +func (rb *Bitmap) GetFrozenSizeInBytes() uint64 { nBits, nArrayEl, nRunEl := uint64(0), uint64(0), uint64(0) - for _, c := range bm.highlowcontainer.containers { + for _, c := range rb.highlowcontainer.containers { switch v := c.(type) { case *bitmapContainer: nBits++ @@ -486,19 +494,21 @@ func (bm *Bitmap) GetFrozenSizeInBytes() uint64 { nRunEl += uint64(len(v.iv)) } } - return 4 + 5*uint64(len(bm.highlowcontainer.containers)) + + return 4 + 5*uint64(len(rb.highlowcontainer.containers)) + (nBits << 13) + 2*nArrayEl + 4*nRunEl } -func (bm *Bitmap) Freeze() ([]byte, error) { - sz := bm.GetFrozenSizeInBytes() +// Freeze serializes the bitmap in the CRoaring's frozen format. +func (rb *Bitmap) Freeze() ([]byte, error) { + sz := rb.GetFrozenSizeInBytes() buf := make([]byte, sz) - _, err := bm.FreezeTo(buf) + _, err := rb.FreezeTo(buf) return buf, err } -func (bm *Bitmap) FreezeTo(buf []byte) (int, error) { - containers := bm.highlowcontainer.containers +// FreezeTo serializes the bitmap in the CRoaring's frozen format. +func (rb *Bitmap) FreezeTo(buf []byte) (int, error) { + containers := rb.highlowcontainer.containers nCont := len(containers) nBits, nArrayEl, nRunEl := 0, 0, 0 @@ -515,7 +525,7 @@ func (bm *Bitmap) FreezeTo(buf []byte) (int, error) { serialSize := 4 + 5*nCont + (1<<13)*nBits + 4*nRunEl + 2*nArrayEl if len(buf) < serialSize { - return 0, FrozenBitmapBufferTooSmall + return 0, ErrFrozenBitmapBufferTooSmall } bitsArena := byteSliceAsUint64Slice(buf[:(1<<13)*nBits]) @@ -536,10 +546,10 @@ func (bm *Bitmap) FreezeTo(buf []byte) (int, error) { types := buf[:nCont] buf = buf[nCont:] - header := uint32(FROZEN_COOKIE | (nCont << 15)) + header := uint32(frozenCookie | (nCont << 15)) binary.LittleEndian.PutUint32(buf[:4], header) - copy(keys, bm.highlowcontainer.keys[:]) + copy(keys, rb.highlowcontainer.keys[:]) for i, c := range containers { switch v := c.(type) { @@ -566,11 +576,12 @@ func (bm *Bitmap) FreezeTo(buf []byte) (int, error) { return serialSize, nil } -func (bm *Bitmap) WriteFrozenTo(wr io.Writer) (int, error) { +// WriteFrozenTo serializes the bitmap in the CRoaring's frozen format. +func (rb *Bitmap) WriteFrozenTo(wr io.Writer) (int, error) { // FIXME: this is a naive version that iterates 4 times through the // containers and allocates 3*len(containers) bytes; it's quite likely // it can be done more efficiently. - containers := bm.highlowcontainer.containers + containers := rb.highlowcontainer.containers written := 0 for _, c := range containers { @@ -609,7 +620,7 @@ func (bm *Bitmap) WriteFrozenTo(wr io.Writer) (int, error) { } } - n, err := wr.Write(uint16SliceAsByteSlice(bm.highlowcontainer.keys)) + n, err := wr.Write(uint16SliceAsByteSlice(rb.highlowcontainer.keys)) written += n if err != nil { return written, err @@ -641,7 +652,7 @@ func (bm *Bitmap) WriteFrozenTo(wr io.Writer) (int, error) { return written, err } - header := uint32(FROZEN_COOKIE | (len(containers) << 15)) + header := uint32(frozenCookie | (len(containers) << 15)) if err := binary.Write(wr, binary.LittleEndian, header); err != nil { return written, err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index 79eae3632e..e8c52e4922 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.26.2 (2023-12-20) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.26.1 (2023-12-08) * **Bug Fix**: Correct loading of [services *] sections into shared config. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index b7c325d3ea..2d4a44d240 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.26.1" +const goModuleVersion = "1.26.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index dd7af71d18..9a801cd36a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.16.13 (2023-12-20) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.16.12 (2023-12-08) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index ec3eb5f6e8..5caa4ccc0a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.12" +const goModuleVersion = "1.16.13" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/route53/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/route53/CHANGELOG.md index c4c7cacc9b..737f25b242 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/route53/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/route53/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.36.0 (2023-12-20) + +* **Feature**: Amazon Route 53 now supports the Canada West (Calgary) Region (ca-west-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. + # v1.35.5 (2023-12-08) * **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/route53/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/route53/go_module_metadata.go index edce7370e4..3176e8a9d9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/route53/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/route53/go_module_metadata.go @@ -3,4 +3,4 @@ package route53 // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.35.5" +const goModuleVersion = "1.36.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/route53/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/route53/types/enums.go index 30e48610d0..80f7a16877 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/route53/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/route53/types/enums.go @@ -121,6 +121,7 @@ const ( CloudWatchRegionUsIsobEast1 CloudWatchRegion = "us-isob-east-1" CloudWatchRegionApSoutheast4 CloudWatchRegion = "ap-southeast-4" CloudWatchRegionIlCentral1 CloudWatchRegion = "il-central-1" + CloudWatchRegionCaWest1 CloudWatchRegion = "ca-west-1" ) // Values returns all known values for CloudWatchRegion. Note that this can be @@ -163,6 +164,7 @@ func (CloudWatchRegion) Values() []CloudWatchRegion { "us-isob-east-1", "ap-southeast-4", "il-central-1", + "ca-west-1", } } @@ -377,6 +379,7 @@ const ( ResourceRecordSetRegionEuSouth2 ResourceRecordSetRegion = "eu-south-2" ResourceRecordSetRegionApSoutheast4 ResourceRecordSetRegion = "ap-southeast-4" ResourceRecordSetRegionIlCentral1 ResourceRecordSetRegion = "il-central-1" + ResourceRecordSetRegionCaWest1 ResourceRecordSetRegion = "ca-west-1" ) // Values returns all known values for ResourceRecordSetRegion. Note that this can @@ -414,6 +417,7 @@ func (ResourceRecordSetRegion) Values() []ResourceRecordSetRegion { "eu-south-2", "ap-southeast-4", "il-central-1", + "ca-west-1", } } @@ -554,6 +558,7 @@ const ( VPCRegionEuSouth2 VPCRegion = "eu-south-2" VPCRegionApSoutheast4 VPCRegion = "ap-southeast-4" VPCRegionIlCentral1 VPCRegion = "il-central-1" + VPCRegionCaWest1 VPCRegion = "ca-west-1" ) // Values returns all known values for VPCRegion. Note that this can be expanded @@ -595,5 +600,6 @@ func (VPCRegion) Values() []VPCRegion { "eu-south-2", "ap-southeast-4", "il-central-1", + "ca-west-1", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/route53/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/route53/types/types.go index d7c1e0d20c..df1aca6fa8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/route53/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/route53/types/types.go @@ -1228,10 +1228,6 @@ type ResourceRecordSet struct { // - If you include * in any position other than the leftmost label in a domain // name, DNS treats it as an * character (ASCII 42), not as a wildcard. You can't // use the * wildcard for resource records sets that have a type of NS. - // You can use the * wildcard as the leftmost label in a domain name, for example, - // *.example.com . You can't use an * for one of the middle labels, for example, - // marketing.*.example.com . In addition, the * must replace the entire label; for - // example, you can't specify prod*.example.com . // // This member is required. Name *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 17dd41f359..e0961ad895 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.26.6 (2023-12-20) + +* No change notes available for this release. + # v1.26.5 (2023-12-08) * **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 61667eb2c2..cfdd8bfb96 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.26.5" +const goModuleVersion = "1.26.6" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go index ca4c881909..3dbd993b54 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go @@ -183,6 +183,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{}, diff --git a/vendor/github.com/cockroachdb/pebble/README.md b/vendor/github.com/cockroachdb/pebble/README.md index c09e45d802..683e06ece8 100644 --- a/vendor/github.com/cockroachdb/pebble/README.md +++ b/vendor/github.com/cockroachdb/pebble/README.md @@ -86,17 +86,22 @@ differences. ## RocksDB Compatibility -Pebble strives for forward compatibility with RocksDB 6.2.1 (the latest -version of RocksDB used by CockroachDB). Forward compatibility means -that a DB generated by RocksDB can be used by Pebble. Currently, Pebble -provides bidirectional compatibility with RocksDB (a Pebble generated DB -can be used by RocksDB) when using its FormatMostCompatible format. New -functionality that is backwards incompatible is gated behind new format -major versions. In general, Pebble only provides compatibility with the -subset of functionality and configuration used by CockroachDB. The scope -of RocksDB functionality and configuration is too large to adequately -test and document all the incompatibilities. The list below contains -known incompatibilities. +Pebble strives for forward compatibility with RocksDB 6.2.1 (the latest version +of RocksDB used by CockroachDB). Forward compatibility means that a DB generated +by RocksDB 6.2.1 can be upgraded for use by Pebble. Pebble versions in the `v1` +series may open DBs generated by RocksDB 6.2.1. Since its introduction, Pebble +has adopted various backwards-incompatible format changes that are gated behind +new 'format major versions'. The Pebble `master` branch does not support opening +DBs generated by RocksDB. DBs generated by RocksDB may only be used with recent +versions of Pebble after migrating them through format major version upgrades +using previous versions of Pebble. See the below section of format major +versions. + +Even the RocksDB-compatible versions of Pebble only provide compatibility with +the subset of functionality and configuration used by CockroachDB. The scope of +RocksDB functionality and configuration is too large to adequately test and +document all the incompatibilities. The list below contains known +incompatibilities. * Pebble's use of WAL recycling is only compatible with RocksDB's `kTolerateCorruptedTailRecords` WAL recovery mode. Older versions of @@ -119,9 +124,14 @@ known incompatibilities. Over time Pebble has introduced new physical file formats. Backwards incompatible changes are made through the introduction of 'format major -versions'. By default, when Pebble opens a database, it defaults to -`FormatMostCompatible`. This version is bi-directionally compatible with RocksDB -6.2.1 (with the caveats described above). +versions'. By default, when Pebble opens a database, it defaults to the lowest +supported version. In `v1`, this is `FormatMostCompatible`, which is +bi-directionally compatible with RocksDB 6.2.1 (with the caveats described +above). + +Databases created by RocksDB or Pebble versions `v1` and earlier must be upgraded +to a compatible format major version before running newer Pebble versions. Newer +Pebble versions will refuse to open databases in no longer supported formats. To opt into new formats, a user may set `FormatMajorVersion` on the [`Options`](https://pkg.go.dev/github.com/cockroachdb/pebble#Options) @@ -132,24 +142,25 @@ upgrade the format major version at runtime using Format major version upgrades are permanent; There is no option to return to an earlier format. -The table below outlines the history of format major versions: - -| Name | Value | Migration | -|------------------------------------|-------|------------| -| FormatMostCompatible | 1 | No | -| FormatVersioned | 3 | No | -| FormatSetWithDelete | 4 | No | -| FormatBlockPropertyCollector | 5 | No | -| FormatSplitUserKeysMarked | 6 | Background | -| FormatSplitUserKeysMarkedCompacted | 7 | Blocking | -| FormatRangeKeys | 8 | No | -| FormatMinTableFormatPebblev1 | 9 | No | -| FormatPrePebblev1Marked | 10 | Background | -| FormatSSTableValueBlocks | 12 | No | -| FormatFlushableIngest | 13 | No | -| FormatPrePebblev1MarkedCompacted | 14 | Blocking | -| FormatDeleteSizedAndObsolete | 15 | No | -| FormatVirtualSSTables | 16 | No | +The table below outlines the history of format major versions, along with what +range of Pebble versions support that format. + +| Name | Value | Migration | Pebble support | +|------------------------------------|-------|------------|----------------| +| FormatMostCompatible | 1 | No | v1 | +| FormatVersioned | 3 | No | v1 | +| FormatSetWithDelete | 4 | No | v1 | +| FormatBlockPropertyCollector | 5 | No | v1 | +| FormatSplitUserKeysMarked | 6 | Background | v1 | +| FormatSplitUserKeysMarkedCompacted | 7 | Blocking | v1 | +| FormatRangeKeys | 8 | No | v1 | +| FormatMinTableFormatPebblev1 | 9 | No | v1 | +| FormatPrePebblev1Marked | 10 | Background | v1 | +| FormatSSTableValueBlocks | 12 | No | v1 | +| FormatFlushableIngest | 13 | No | v1, master | +| FormatPrePebblev1MarkedCompacted | 14 | Blocking | v1, master | +| FormatDeleteSizedAndObsolete | 15 | No | v1, master | +| FormatVirtualSSTables | 16 | No | v1, master | Upgrading to a format major version with 'Background' in the migration column may trigger background activity to rewrite physical file @@ -172,7 +183,6 @@ versions for CockroachDB releases. | 22.2 | FormatMostCompatible | FormatPrePebblev1Marked | | 23.1 | FormatSplitUserKeysMarkedCompacted | FormatFlushableIngest | | 23.2 | FormatSplitUserKeysMarkedCompacted | FormatVirtualSSTables | -| 24.1 plan | FormatSSTableValueBlocks | | ## Pedigree diff --git a/vendor/github.com/cockroachdb/pebble/batch.go b/vendor/github.com/cockroachdb/pebble/batch.go index c695f8d2aa..ad419f147e 100644 --- a/vendor/github.com/cockroachdb/pebble/batch.go +++ b/vendor/github.com/cockroachdb/pebble/batch.go @@ -485,9 +485,6 @@ func (b *Batch) refreshMemTableSize() error { } b.memTableSize += memTableEntrySize(len(key), len(value)) } - if b.countRangeKeys > 0 && b.minimumFormatMajorVersion < FormatRangeKeys { - b.minimumFormatMajorVersion = FormatRangeKeys - } return nil } @@ -968,9 +965,6 @@ func (b *Batch) rangeKeySetDeferred(startLen, internalValueLen int) *DeferredBat func (b *Batch) incrementRangeKeysCount() { b.countRangeKeys++ - if b.minimumFormatMajorVersion < FormatRangeKeys { - b.minimumFormatMajorVersion = FormatRangeKeys - } if b.index != nil { b.rangeKeys = nil b.rangeKeysSeqNum = 0 diff --git a/vendor/github.com/cockroachdb/pebble/checkpoint.go b/vendor/github.com/cockroachdb/pebble/checkpoint.go index f321c01ec3..00ee6c5659 100644 --- a/vendor/github.com/cockroachdb/pebble/checkpoint.go +++ b/vendor/github.com/cockroachdb/pebble/checkpoint.go @@ -411,17 +411,12 @@ func (d *DB) writeCheckpointManifest( return err } - // Recent format versions use an atomic marker for setting the - // active manifest. Older versions use the CURRENT file. The - // setCurrentFunc function will return a closure that will - // take the appropriate action for the database's format - // version. var manifestMarker *atomicfs.Marker manifestMarker, _, err := atomicfs.LocateMarker(fs, destDirPath, manifestMarkerName) if err != nil { return err } - if err := setCurrentFunc(formatVers, manifestMarker, fs, destDirPath, destDir)(manifestFileNum); err != nil { + if err := manifestMarker.Move(base.MakeFilename(fileTypeManifest, manifestFileNum)); err != nil { return err } return manifestMarker.Close() diff --git a/vendor/github.com/cockroachdb/pebble/compaction.go b/vendor/github.com/cockroachdb/pebble/compaction.go index 9b10b45335..b80c16eda9 100644 --- a/vendor/github.com/cockroachdb/pebble/compaction.go +++ b/vendor/github.com/cockroachdb/pebble/compaction.go @@ -1003,8 +1003,8 @@ func (c *compaction) setupInuseKeyRanges() { } // calculateInuseKeyRanges will return a series of sorted spans. Overlapping // or abutting spans have already been merged. - c.inuseKeyRanges = calculateInuseKeyRanges( - c.version, c.cmp, level, numLevels-1, c.smallest.UserKey, c.largest.UserKey, + c.inuseKeyRanges = c.version.CalculateInuseKeyRanges( + c.cmp, level, numLevels-1, c.smallest.UserKey, c.largest.UserKey, ) // Check if there's a single in-use span that encompasses the entire key // range of the compaction. This is an optimization to avoid key comparisons @@ -1016,115 +1016,6 @@ func (c *compaction) setupInuseKeyRanges() { } } -func calculateInuseKeyRanges( - v *version, cmp base.Compare, level, maxLevel int, smallest, largest []byte, -) []manifest.UserKeyRange { - // Use two slices, alternating which one is input and which one is output - // as we descend the LSM. - var input, output []manifest.UserKeyRange - - // L0 requires special treatment, since sstables within L0 may overlap. - // We use the L0 Sublevels structure to efficiently calculate the merged - // in-use key ranges. - if level == 0 { - output = v.L0Sublevels.InUseKeyRanges(smallest, largest) - level++ - } - - for ; level <= maxLevel; level++ { - // NB: We always treat `largest` as inclusive for simplicity, because - // there's little consequence to calculating slightly broader in-use key - // ranges. - overlaps := v.Overlaps(level, cmp, smallest, largest, false /* exclusiveEnd */) - iter := overlaps.Iter() - - // We may already have in-use key ranges from higher levels. Iterate - // through both our accumulated in-use key ranges and this level's - // files, merging the two. - // - // Tables higher within the LSM have broader key spaces. We use this - // when possible to seek past a level's files that are contained by - // our current accumulated in-use key ranges. This helps avoid - // per-sstable work during flushes or compactions in high levels which - // overlap the majority of the LSM's sstables. - input, output = output, input - output = output[:0] - - var currFile *fileMetadata - var currAccum *manifest.UserKeyRange - if len(input) > 0 { - currAccum, input = &input[0], input[1:] - } - - // If we have an accumulated key range and its start is ≤ smallest, - // we can seek to the accumulated range's end. Otherwise, we need to - // start at the first overlapping file within the level. - if currAccum != nil && cmp(currAccum.Start, smallest) <= 0 { - currFile = seekGT(&iter, cmp, currAccum.End) - } else { - currFile = iter.First() - } - - for currFile != nil || currAccum != nil { - // If we've exhausted either the files in the level or the - // accumulated key ranges, we just need to append the one we have. - // If we have both a currFile and a currAccum, they either overlap - // or they're disjoint. If they're disjoint, we append whichever - // one sorts first and move on to the next file or range. If they - // overlap, we merge them into currAccum and proceed to the next - // file. - switch { - case currAccum == nil || (currFile != nil && cmp(currFile.Largest.UserKey, currAccum.Start) < 0): - // This file is strictly before the current accumulated range, - // or there are no more accumulated ranges. - output = append(output, manifest.UserKeyRange{ - Start: currFile.Smallest.UserKey, - End: currFile.Largest.UserKey, - }) - currFile = iter.Next() - case currFile == nil || (currAccum != nil && cmp(currAccum.End, currFile.Smallest.UserKey) < 0): - // The current accumulated key range is strictly before the - // current file, or there are no more files. - output = append(output, *currAccum) - currAccum = nil - if len(input) > 0 { - currAccum, input = &input[0], input[1:] - } - default: - // The current accumulated range and the current file overlap. - // Adjust the accumulated range to be the union. - if cmp(currFile.Smallest.UserKey, currAccum.Start) < 0 { - currAccum.Start = currFile.Smallest.UserKey - } - if cmp(currFile.Largest.UserKey, currAccum.End) > 0 { - currAccum.End = currFile.Largest.UserKey - } - - // Extending `currAccum`'s end boundary may have caused it to - // overlap with `input` key ranges that we haven't processed - // yet. Merge any such key ranges. - for len(input) > 0 && cmp(input[0].Start, currAccum.End) <= 0 { - if cmp(input[0].End, currAccum.End) > 0 { - currAccum.End = input[0].End - } - input = input[1:] - } - // Seek the level iterator past our current accumulated end. - currFile = seekGT(&iter, cmp, currAccum.End) - } - } - } - return output -} - -func seekGT(iter *manifest.LevelIterator, cmp base.Compare, key []byte) *manifest.FileMetadata { - f := iter.SeekGE(cmp, key) - for f != nil && cmp(f.Largest.UserKey, key) == 0 { - f = iter.Next() - } - return f -} - // findGrandparentLimit takes the start user key for a table and returns the // user key to which that table can extend without excessively overlapping // the grandparent level. If no limit is needed considering the grandparent @@ -1277,22 +1168,16 @@ func (c *compaction) newInputIter( newIters tableNewIters, newRangeKeyIter keyspan.TableNewSpanIter, snapshots []uint64, ) (_ internalIterator, retErr error) { // Validate the ordering of compaction input files for defense in depth. - // TODO(jackson): Some of the CheckOrdering calls may be adapted to pass - // ProhibitSplitUserKeys if we thread the active format major version in. Or - // if we remove support for earlier FMVs, we can remove the parameter - // altogether. if len(c.flushing) == 0 { if c.startLevel.level >= 0 { err := manifest.CheckOrdering(c.cmp, c.formatKey, - manifest.Level(c.startLevel.level), c.startLevel.files.Iter(), - manifest.AllowSplitUserKeys) + manifest.Level(c.startLevel.level), c.startLevel.files.Iter()) if err != nil { return nil, err } } err := manifest.CheckOrdering(c.cmp, c.formatKey, - manifest.Level(c.outputLevel.level), c.outputLevel.files.Iter(), - manifest.AllowSplitUserKeys) + manifest.Level(c.outputLevel.level), c.outputLevel.files.Iter()) if err != nil { return nil, err } @@ -1302,9 +1187,7 @@ func (c *compaction) newInputIter( } for _, info := range c.startLevel.l0SublevelInfo { err := manifest.CheckOrdering(c.cmp, c.formatKey, - info.sublevel, info.Iter(), - // NB: L0 sublevels have never allowed split user keys. - manifest.ProhibitSplitUserKeys) + info.sublevel, info.Iter()) if err != nil { return nil, err } @@ -1316,8 +1199,7 @@ func (c *compaction) newInputIter( } interLevel := c.extraLevels[0] err := manifest.CheckOrdering(c.cmp, c.formatKey, - manifest.Level(interLevel.level), interLevel.files.Iter(), - manifest.AllowSplitUserKeys) + manifest.Level(interLevel.level), interLevel.files.Iter()) if err != nil { return nil, err } @@ -3173,10 +3055,6 @@ func (d *DB) runCompaction( } writerOpts := d.opts.MakeWriterOptions(c.outputLevel.level, tableFormat) - if formatVers < FormatBlockPropertyCollector { - // Cannot yet write block properties. - writerOpts.BlockPropertyCollectors = nil - } // prevPointKey is a sstable.WriterOption that provides access to // the last point key written to a writer's sstable. When a new diff --git a/vendor/github.com/cockroachdb/pebble/compaction_iter.go b/vendor/github.com/cockroachdb/pebble/compaction_iter.go index 299dbfc983..c86173e711 100644 --- a/vendor/github.com/cockroachdb/pebble/compaction_iter.go +++ b/vendor/github.com/cockroachdb/pebble/compaction_iter.go @@ -753,12 +753,9 @@ func (i *compactionIter) setNext() { i.valid = true i.maybeZeroSeqnum(i.curSnapshotIdx) - // There are two cases where we can early return and skip the remaining + // If this key is already a SETWITHDEL we can early return and skip the remaining // records in the stripe: - // - If the DB does not SETWITHDEL. - // - If this key is already a SETWITHDEL. - if i.formatVersion < FormatSetWithDelete || - i.iterKey.Kind() == InternalKeyKindSetWithDelete { + if i.iterKey.Kind() == InternalKeyKindSetWithDelete { i.skip = true return } diff --git a/vendor/github.com/cockroachdb/pebble/db.go b/vendor/github.com/cockroachdb/pebble/db.go index ab00321c53..56b00ae5ee 100644 --- a/vendor/github.com/cockroachdb/pebble/db.go +++ b/vendor/github.com/cockroachdb/pebble/db.go @@ -829,13 +829,11 @@ func (d *DB) applyInternal(batch *Batch, opts *WriteOptions, noSyncWait bool) er return errors.New("pebble: WAL disabled") } - if batch.minimumFormatMajorVersion != FormatMostCompatible { - if fmv := d.FormatMajorVersion(); fmv < batch.minimumFormatMajorVersion { - panic(fmt.Sprintf( - "pebble: batch requires at least format major version %d (current: %d)", - batch.minimumFormatMajorVersion, fmv, - )) - } + if fmv := d.FormatMajorVersion(); fmv < batch.minimumFormatMajorVersion { + panic(fmt.Sprintf( + "pebble: batch requires at least format major version %d (current: %d)", + batch.minimumFormatMajorVersion, fmv, + )) } if batch.countRangeKeys > 0 { @@ -1036,14 +1034,6 @@ func (d *DB) newIter( panic(err) } seqNum := internalOpts.snapshot.seqNum - if o.rangeKeys() { - if d.FormatMajorVersion() < FormatRangeKeys { - panic(fmt.Sprintf( - "pebble: range keys require at least format major version %d (current: %d)", - FormatRangeKeys, d.FormatMajorVersion(), - )) - } - } if o != nil && o.RangeKeyMasking.Suffix != nil && o.KeyTypes != IterKeyTypePointsAndRanges { panic("pebble: range key masking requires IterKeyTypePointsAndRanges") } @@ -1889,7 +1879,7 @@ func (d *DB) splitManualCompaction( if level == 0 { endLevel = baseLevel } - keyRanges := calculateInuseKeyRanges(curr, d.cmp, level, endLevel, start, end) + keyRanges := curr.CalculateInuseKeyRanges(d.cmp, level, endLevel, start, end) for _, keyRange := range keyRanges { splitCompactions = append(splitCompactions, &manualCompaction{ level: level, diff --git a/vendor/github.com/cockroachdb/pebble/external_iterator.go b/vendor/github.com/cockroachdb/pebble/external_iterator.go index 078d016fbf..0b02e8f41d 100644 --- a/vendor/github.com/cockroachdb/pebble/external_iterator.go +++ b/vendor/github.com/cockroachdb/pebble/external_iterator.go @@ -101,16 +101,6 @@ func NewExternalIterWithContext( var readers [][]*sstable.Reader - // Ensure we close all the opened readers if we error out. - defer func() { - if err != nil { - for i := range readers { - for j := range readers[i] { - _ = readers[i][j].Close() - } - } - } - }() seqNumOffset := 0 var extraReaderOpts []sstable.ReaderOption for i := range extraOpts { @@ -120,13 +110,18 @@ func NewExternalIterWithContext( seqNumOffset += len(levelFiles) } for _, levelFiles := range files { - var subReaders []*sstable.Reader seqNumOffset -= len(levelFiles) - subReaders, err = openExternalTables(o, levelFiles, seqNumOffset, o.MakeReaderOptions(), extraReaderOpts...) + subReaders, err := openExternalTables(o, levelFiles, seqNumOffset, o.MakeReaderOptions(), extraReaderOpts...) readers = append(readers, subReaders) - } - if err != nil { - return nil, err + if err != nil { + // Close all the opened readers. + for i := range readers { + for j := range readers[i] { + _ = readers[i][j].Close() + } + } + return nil, err + } } buf := iterAllocPool.Get().(*iterAlloc) diff --git a/vendor/github.com/cockroachdb/pebble/filenames.go b/vendor/github.com/cockroachdb/pebble/filenames.go index 07d74c87d3..86e7ed1d37 100644 --- a/vendor/github.com/cockroachdb/pebble/filenames.go +++ b/vendor/github.com/cockroachdb/pebble/filenames.go @@ -4,12 +4,7 @@ package pebble -import ( - "fmt" - - "github.com/cockroachdb/pebble/internal/base" - "github.com/cockroachdb/pebble/vfs" -) +import "github.com/cockroachdb/pebble/internal/base" type fileType = base.FileType @@ -21,34 +16,7 @@ const ( fileTypeLock = base.FileTypeLock fileTypeTable = base.FileTypeTable fileTypeManifest = base.FileTypeManifest - fileTypeCurrent = base.FileTypeCurrent fileTypeOptions = base.FileTypeOptions fileTypeTemp = base.FileTypeTemp fileTypeOldTemp = base.FileTypeOldTemp ) - -// setCurrentFile sets the CURRENT file to point to the manifest with -// provided file number. -// -// NB: This is a low-level routine and typically not what you want to -// use. Newer versions of Pebble running newer format major versions do -// not use the CURRENT file. See setCurrentFunc in version_set.go. -func setCurrentFile(dirname string, fs vfs.FS, fileNum base.DiskFileNum) error { - newFilename := base.MakeFilepath(fs, dirname, fileTypeCurrent, fileNum) - oldFilename := base.MakeFilepath(fs, dirname, fileTypeTemp, fileNum) - fs.Remove(oldFilename) - f, err := fs.Create(oldFilename) - if err != nil { - return err - } - if _, err := fmt.Fprintf(f, "MANIFEST-%s\n", fileNum); err != nil { - return err - } - if err := f.Sync(); err != nil { - return err - } - if err := f.Close(); err != nil { - return err - } - return fs.Rename(oldFilename, newFilename) -} diff --git a/vendor/github.com/cockroachdb/pebble/format_major_version.go b/vendor/github.com/cockroachdb/pebble/format_major_version.go index 89be1610df..6f8576dcf8 100644 --- a/vendor/github.com/cockroachdb/pebble/format_major_version.go +++ b/vendor/github.com/cockroachdb/pebble/format_major_version.go @@ -9,7 +9,6 @@ import ( "strconv" "github.com/cockroachdb/errors" - "github.com/cockroachdb/pebble/internal/base" "github.com/cockroachdb/pebble/internal/manifest" "github.com/cockroachdb/pebble/sstable" "github.com/cockroachdb/pebble/vfs" @@ -43,15 +42,18 @@ func (v FormatMajorVersion) String() string { } const ( + // FormatDefault leaves the format version unspecified. When used to create a + // new store, Pebble will choose the earliest format version it supports. + FormatDefault FormatMajorVersion = iota + // 21.2 versions. - // FormatDefault leaves the format version unspecified. The - // FormatDefault constant may be ratcheted upwards over time. - FormatDefault FormatMajorVersion = iota // FormatMostCompatible maintains the most backwards compatibility, // maintaining bi-directional compatibility with RocksDB 6.2.1 in // the particular configuration described in the Pebble README. - FormatMostCompatible + // Deprecated. + _ // FormatMostCompatible + // formatVersionedManifestMarker is the first // backwards-incompatible change made to Pebble, introducing the // format-version marker file for handling backwards-incompatible @@ -63,28 +65,36 @@ const ( // format major version. Clients should use FormatVersioned which // also ensures earlier versions of Pebble fail to open a database // written in a future format major version. - formatVersionedManifestMarker + // Deprecated. + _ // formatVersionedManifestMarker + // FormatVersioned is a new format major version that replaces the // old `CURRENT` file with a new 'marker' file scheme. Previous // Pebble versions will be unable to open the database unless // they're aware of format versions. - FormatVersioned + // Deprecated. + _ // FormatVersioned + // FormatSetWithDelete is a format major version that introduces a new key // kind, base.InternalKeyKindSetWithDelete. Previous Pebble versions will be // unable to open this database. - FormatSetWithDelete + // Deprecated. + _ // FormatSetWithDelete // 22.1 versions. // FormatBlockPropertyCollector is a format major version that introduces // BlockPropertyCollectors. - FormatBlockPropertyCollector + // Deprecated. + _ // FormatBlockPropertyCollector + // FormatSplitUserKeysMarked is a format major version that guarantees that // all files that share user keys with neighbors are marked for compaction // in the manifest. Ratcheting to FormatSplitUserKeysMarked will block // (without holding mutexes) until the scan of the LSM is complete and the // manifest has been rotated. - FormatSplitUserKeysMarked + // Deprecated. + _ // FormatSplitUserKeysMarked // 22.2 versions. @@ -95,21 +105,28 @@ const ( // across multiple files within a level L1+. Ratcheting to this format version // will block (without holding mutexes) until all necessary compactions for // files marked for compaction are complete. - FormatSplitUserKeysMarkedCompacted + // Deprecated. + _ // FormatSplitUserKeysMarkedCompacted + // FormatRangeKeys is a format major version that introduces range keys. - FormatRangeKeys + // Deprecated. + _ // FormatRangeKeys + // FormatMinTableFormatPebblev1 is a format major version that guarantees that // tables created by or ingested into the DB at or above this format major // version will have a table format version of at least Pebblev1 (Block // Properties). - FormatMinTableFormatPebblev1 + // Deprecated. + _ // FormatMinTableFormatPebblev1 + // FormatPrePebblev1Marked is a format major version that guarantees that all // sstables with a table format version pre-Pebblev1 (i.e. those that are // guaranteed to not contain block properties) are marked for compaction in // the manifest. Ratcheting to FormatPrePebblev1Marked will block (without // holding mutexes) until the scan of the LSM is complete and the manifest has // been rotated. - FormatPrePebblev1Marked + // Deprecated. + _ // FormatPrePebblev1Marked // 23.1 versions. @@ -118,21 +135,13 @@ const ( // release. It was later decided that this should be deferred until a // subsequent release. The original ordering is preserved so as not to // introduce breaking changes in Cockroach. - formatUnusedPrePebblev1MarkedCompacted + _ // formatUnusedPrePebblev1MarkedCompacted // FormatSSTableValueBlocks is a format major version that adds support for // storing values in value blocks in the sstable. Value block support is not // necessarily enabled when writing sstables, when running with this format // major version. - // - // WARNING: In development, so no production code should upgrade to this - // format, since a DB with this format major version will not actually - // interoperate correctly with another DB with the same format major - // version. This format major version is introduced so that tests can start - // being executed up to this version. Note that these tests succeed despite - // the incomplete support since they do not enable value blocks and use - // TableFormatPebblev2. - FormatSSTableValueBlocks + _ // FormatSSTableValueBlocks // FormatFlushableIngest is a format major version that enables lazy // addition of ingested sstables into the LSM structure. When an ingest @@ -169,31 +178,45 @@ const ( // a format major version. FormatVirtualSSTables - // internalFormatNewest holds the newest format major version, including - // experimental ones excluded from the exported FormatNewest constant until - // they've stabilized. Used in tests. - internalFormatNewest FormatMajorVersion = iota - 1 + // FormatSyntheticPrefixes is a format major version that adds support for + // sstables to have their content exposed in a different prefix of keyspace + // than the actual prefix persisted in the keys in such sstables. The prefix + // replacement information is stored in new fields in the Manifest and thus + // requires a format major version. + FormatSyntheticPrefixes + + // -- Add new versions here -- + + // FormatNewest is the most recent format major version. + FormatNewest FormatMajorVersion = iota - 1 + + // Experimental versions, which are excluded by FormatNewest (but can be used + // in tests) can be defined here. + + // -- Add experimental versions here -- - // FormatNewest always contains the most recent format major version. - FormatNewest FormatMajorVersion = internalFormatNewest + // internalFormatNewest is the most recent, possibly experimental format major + // version. + internalFormatNewest FormatMajorVersion = iota - 2 ) +// FormatMinSupported is the minimum format version that is supported by this +// Pebble version. +const FormatMinSupported = FormatFlushableIngest + +// IsSupported returns true if the version is supported by the current Pebble +// version. +func (v FormatMajorVersion) IsSupported() bool { + return v == FormatDefault && v >= FormatMinSupported && v <= internalFormatNewest +} + // MaxTableFormat returns the maximum sstable.TableFormat that can be used at // this FormatMajorVersion. func (v FormatMajorVersion) MaxTableFormat() sstable.TableFormat { switch v { - case FormatDefault, FormatMostCompatible, formatVersionedManifestMarker, - FormatVersioned, FormatSetWithDelete: - return sstable.TableFormatRocksDBv2 - case FormatBlockPropertyCollector, FormatSplitUserKeysMarked, - FormatSplitUserKeysMarkedCompacted: - return sstable.TableFormatPebblev1 - case FormatRangeKeys, FormatMinTableFormatPebblev1, FormatPrePebblev1Marked, - formatUnusedPrePebblev1MarkedCompacted: - return sstable.TableFormatPebblev2 - case FormatSSTableValueBlocks, FormatFlushableIngest, FormatPrePebblev1MarkedCompacted: + case FormatDefault, FormatFlushableIngest, FormatPrePebblev1MarkedCompacted: return sstable.TableFormatPebblev3 - case FormatDeleteSizedAndObsolete, FormatVirtualSSTables: + case FormatDeleteSizedAndObsolete, FormatVirtualSSTables, FormatSyntheticPrefixes: return sstable.TableFormatPebblev4 default: panic(fmt.Sprintf("pebble: unsupported format major version: %s", v)) @@ -204,33 +227,14 @@ func (v FormatMajorVersion) MaxTableFormat() sstable.TableFormat { // this FormatMajorVersion. func (v FormatMajorVersion) MinTableFormat() sstable.TableFormat { switch v { - case FormatDefault, FormatMostCompatible, formatVersionedManifestMarker, - FormatVersioned, FormatSetWithDelete, FormatBlockPropertyCollector, - FormatSplitUserKeysMarked, FormatSplitUserKeysMarkedCompacted, - FormatRangeKeys: - return sstable.TableFormatLevelDB - case FormatMinTableFormatPebblev1, FormatPrePebblev1Marked, - formatUnusedPrePebblev1MarkedCompacted, FormatSSTableValueBlocks, - FormatFlushableIngest, FormatPrePebblev1MarkedCompacted, - FormatDeleteSizedAndObsolete, FormatVirtualSSTables: + case FormatDefault, FormatFlushableIngest, FormatPrePebblev1MarkedCompacted, + FormatDeleteSizedAndObsolete, FormatVirtualSSTables, FormatSyntheticPrefixes: return sstable.TableFormatPebblev1 default: panic(fmt.Sprintf("pebble: unsupported format major version: %s", v)) } } -// orderingInvariants returns an enum encoding the set of invariants that must -// hold within the receiver format major version. Invariants only get stricter -// as the format major version advances, so it is okay to retrieve the -// invariants from the current format major version and by the time the -// invariants are enforced, the format major version has advanced. -func (v FormatMajorVersion) orderingInvariants() manifest.OrderingInvariants { - if v < FormatSplitUserKeysMarkedCompacted { - return manifest.AllowSplitUserKeys - } - return manifest.ProhibitSplitUserKeys -} - // formatMajorVersionMigrations defines the migrations from one format // major version to the next. Each migration is defined as a closure // which will be invoked on the database before the new format major @@ -242,112 +246,7 @@ func (v FormatMajorVersion) orderingInvariants() manifest.OrderingInvariants { // panic if a migration returns a nil error but fails to finalize the // new format major version. var formatMajorVersionMigrations = map[FormatMajorVersion]func(*DB) error{ - FormatMostCompatible: func(d *DB) error { return nil }, - formatVersionedManifestMarker: func(d *DB) error { - // formatVersionedManifestMarker introduces the use of a marker - // file for pointing to the current MANIFEST file. - - // Lock the manifest. - d.mu.versions.logLock() - defer d.mu.versions.logUnlock() - - // Construct the filename of the currently active manifest and - // move the manifest marker to that filename. The marker is - // guaranteed to exist, because we unconditionally locate it - // during Open. - manifestFileNum := d.mu.versions.manifestFileNum - filename := base.MakeFilename(fileTypeManifest, manifestFileNum) - if err := d.mu.versions.manifestMarker.Move(filename); err != nil { - return errors.Wrap(err, "moving manifest marker") - } - - // Now that we have a manifest marker file in place and pointing - // to the current MANIFEST, finalize the upgrade. If we fail for - // some reason, a retry of this migration is guaranteed to again - // move the manifest marker file to the latest manifest. If - // we're unable to finalize the upgrade, a subsequent call to - // Open will ignore the manifest marker. - if err := d.finalizeFormatVersUpgrade(formatVersionedManifestMarker); err != nil { - return err - } - - // We've finalized the upgrade. All subsequent Open calls will - // ignore the CURRENT file and instead read the manifest marker. - // Before we unlock the manifest, we need to update versionSet - // to use the manifest marker on future rotations. - d.mu.versions.setCurrent = setCurrentFuncMarker( - d.mu.versions.manifestMarker, - d.mu.versions.fs, - d.mu.versions.dirname) - return nil - }, - // The FormatVersioned version is split into two, each with their - // own migration to ensure the post-migration cleanup happens even - // if there's a crash immediately after finalizing the version. Once - // a new format major version is finalized, its migration will never - // run again. Post-migration cleanup like the one in the migration - // below must be performed in a separate migration or every time the - // database opens. - FormatVersioned: func(d *DB) error { - // Replace the `CURRENT` file with one that points to the - // nonexistent `MANIFEST-000000` file. If an earlier Pebble - // version that does not know about format major versions - // attempts to open the database, it will error avoiding - // accidental corruption. - if err := setCurrentFile(d.mu.versions.dirname, d.mu.versions.fs, base.FileNum(0).DiskFileNum()); err != nil { - return err - } - return d.finalizeFormatVersUpgrade(FormatVersioned) - }, - // As SetWithDelete is a new key kind, there is nothing to migrate. We can - // simply finalize the format version and we're done. - FormatSetWithDelete: func(d *DB) error { - return d.finalizeFormatVersUpgrade(FormatSetWithDelete) - }, - FormatBlockPropertyCollector: func(d *DB) error { - return d.finalizeFormatVersUpgrade(FormatBlockPropertyCollector) - }, - FormatSplitUserKeysMarked: func(d *DB) error { - // Mark any unmarked files with split-user keys. Note all format major - // versions migrations are invoked with DB.mu locked. - if err := d.markFilesLocked(markFilesWithSplitUserKeys(d.opts.Comparer.Equal)); err != nil { - return err - } - return d.finalizeFormatVersUpgrade(FormatSplitUserKeysMarked) - }, - FormatSplitUserKeysMarkedCompacted: func(d *DB) error { - // Before finalizing the format major version, rewrite any sstables - // still marked for compaction. Note all format major versions - // migrations are invoked with DB.mu locked. - if err := d.compactMarkedFilesLocked(); err != nil { - return err - } - return d.finalizeFormatVersUpgrade(FormatSplitUserKeysMarkedCompacted) - }, - FormatRangeKeys: func(d *DB) error { - return d.finalizeFormatVersUpgrade(FormatRangeKeys) - }, - FormatMinTableFormatPebblev1: func(d *DB) error { - return d.finalizeFormatVersUpgrade(FormatMinTableFormatPebblev1) - }, - FormatPrePebblev1Marked: func(d *DB) error { - // Mark any unmarked files that contain only table properties. Note all - // format major versions migrations are invoked with DB.mu locked. - if err := d.markFilesLocked(markFilesPrePebblev1(d.tableCache)); err != nil { - return err - } - return d.finalizeFormatVersUpgrade(FormatPrePebblev1Marked) - }, - formatUnusedPrePebblev1MarkedCompacted: func(d *DB) error { - // Intentional no-op. - return d.finalizeFormatVersUpgrade(formatUnusedPrePebblev1MarkedCompacted) - }, - FormatSSTableValueBlocks: func(d *DB) error { - return d.finalizeFormatVersUpgrade(FormatSSTableValueBlocks) - }, - FormatFlushableIngest: func(d *DB) error { - return d.finalizeFormatVersUpgrade(FormatFlushableIngest) - }, + FormatFlushableIngest: func(d *DB) error { return nil }, FormatPrePebblev1MarkedCompacted: func(d *DB) error { // Before finalizing the format major version, rewrite any sstables // still marked for compaction. Note all format major versions @@ -363,10 +262,19 @@ var formatMajorVersionMigrations = map[FormatMajorVersion]func(*DB) error{ FormatVirtualSSTables: func(d *DB) error { return d.finalizeFormatVersUpgrade(FormatVirtualSSTables) }, + FormatSyntheticPrefixes: func(d *DB) error { + return d.finalizeFormatVersUpgrade(FormatSyntheticPrefixes) + }, } const formatVersionMarkerName = `format-version` +// lookupFormatMajorVersion retrieves the format version from the format version +// marker file. +// +// If such a file does not exist, returns FormatDefault. Note that this case is +// only acceptable if we are creating a new store (we no longer support +// FormatMostCompatible which is the only one with no version marker file). func lookupFormatMajorVersion( fs vfs.FS, dirname string, ) (FormatMajorVersion, *atomicfs.Marker, error) { @@ -375,7 +283,7 @@ func lookupFormatMajorVersion( return 0, nil, err } if versString == "" { - return FormatMostCompatible, m, nil + return FormatDefault, m, nil } v, err := strconv.ParseUint(versString, 10, 64) if err != nil { @@ -386,7 +294,10 @@ func lookupFormatMajorVersion( return 0, nil, errors.Newf("pebble: default format major version should not persisted", vers) } if vers > internalFormatNewest { - return 0, nil, errors.Newf("pebble: database %q written in format major version %d", dirname, vers) + return 0, nil, errors.Newf("pebble: database %q written in unknown format major version %d", dirname, vers) + } + if vers < FormatMinSupported { + return 0, nil, errors.Newf("pebble: database %q written in format major version %d which is no longer supported", dirname, vers) } return vers, m, nil } @@ -456,11 +367,7 @@ func (d *DB) ratchetFormatMajorVersionLocked(formatVers FormatMajorVersion) erro // // See formatMajorVersionMigrations. func (d *DB) finalizeFormatVersUpgrade(formatVers FormatMajorVersion) error { - // We use the marker to encode the active format version in the - // marker filename. Unlike other uses of the atomic marker, there is - // no file with the filename `formatVers.String()` on the - // filesystem. - if err := d.mu.formatVers.marker.Move(formatVers.String()); err != nil { + if err := d.writeFormatVersionMarker(formatVers); err != nil { return err } d.mu.formatVers.vers.Store(uint64(formatVers)) @@ -468,6 +375,14 @@ func (d *DB) finalizeFormatVersUpgrade(formatVers FormatMajorVersion) error { return nil } +func (d *DB) writeFormatVersionMarker(formatVers FormatMajorVersion) error { + // We use the marker to encode the active format version in the + // marker filename. Unlike other uses of the atomic marker, there is + // no file with the filename `formatVers.String()` on the + // filesystem. + return d.mu.formatVers.marker.Move(formatVers.String()) +} + // compactMarkedFilesLocked performs a migration that schedules rewrite // compactions to compact away any sstables marked for compaction. // compactMarkedFilesLocked is run while ratcheting the database's format major @@ -523,73 +438,11 @@ func (d *DB) compactMarkedFilesLocked() error { // level. type findFilesFunc func(v *version) (found bool, files [numLevels][]*fileMetadata, _ error) -// markFilesWithSplitUserKeys scans the LSM's levels 1 through 6 for adjacent -// files that contain the same user key. Such arrangements of files were -// permitted in RocksDB and in Pebble up to SHA a860bbad. -var markFilesWithSplitUserKeys = func(equal Equal) findFilesFunc { - return func(v *version) (found bool, files [numLevels][]*fileMetadata, _ error) { - // Files with split user keys are expected to be rare and performing key - // comparisons for every file within the LSM is expensive, so drop the - // database lock while scanning the file metadata. - for l := numLevels - 1; l > 0; l-- { - iter := v.Levels[l].Iter() - var prevFile *fileMetadata - var prevUserKey []byte - for f := iter.First(); f != nil; f = iter.Next() { - if prevUserKey != nil && equal(prevUserKey, f.Smallest.UserKey) { - // NB: We may append a file twice, once as prevFile and once - // as f. That's okay, and handled below. - files[l] = append(files[l], prevFile, f) - found = true - } - if f.Largest.IsExclusiveSentinel() { - prevUserKey = nil - prevFile = nil - } else { - prevUserKey = f.Largest.UserKey - prevFile = f - } - } - } - return - } -} - -// markFilesPrePebblev1 scans the LSM for files that do not support block -// properties (i.e. a table format version pre-Pebblev1). -var markFilesPrePebblev1 = func(tc *tableCacheContainer) findFilesFunc { - return func(v *version) (found bool, files [numLevels][]*fileMetadata, err error) { - for l := numLevels - 1; l > 0; l-- { - iter := v.Levels[l].Iter() - for f := iter.First(); f != nil; f = iter.Next() { - if f.Virtual { - // Any physical sstable which has been virtualized must - // have already undergone this migration, and we don't - // need to worry about the virtual sstable themselves. - panic("pebble: unexpected virtual sstable during migration") - } - err = tc.withReader( - f.PhysicalMeta(), func(r *sstable.Reader) error { - tf, err := r.TableFormat() - if err != nil { - return err - } - if tf < sstable.TableFormatPebblev1 { - found = true - files[l] = append(files[l], f) - } - return nil - }) - if err != nil { - return - } - } - } - return - } -} +// This method is not used currently, but it will be useful the next time we need +// to mark files for compaction. +var _ = (*DB)(nil).markFilesLocked -// markFilesLock durably marks the files that match the given findFilesFunc for +// markFilesLocked durably marks the files that match the given findFilesFunc for // compaction. func (d *DB) markFilesLocked(findFn findFilesFunc) error { jobID := d.mu.nextJobID diff --git a/vendor/github.com/cockroachdb/pebble/ingest.go b/vendor/github.com/cockroachdb/pebble/ingest.go index 2ef8f43296..2373b07907 100644 --- a/vendor/github.com/cockroachdb/pebble/ingest.go +++ b/vendor/github.com/cockroachdb/pebble/ingest.go @@ -118,9 +118,12 @@ func ingestSynthesizeShared( // NB: We create new internal keys and pass them into ExternalRangeKeyBounds // so that we can sub a zero sequence number into the bounds. We can set // the sequence number to anything here; it'll be reset in ingestUpdateSeqNum - // anyway. However we do need to use the same sequence number across all + // anyway. However, we do need to use the same sequence number across all // bound keys at this step so that we end up with bounds that are consistent // across point/range keys. + // Note that the kind of the smallest key might change because of the seqnum + // rewriting. For example, the sstable could start with a.SET.2 and + // a.RANGEDEL.1 (with smallest key being a.SET.2) but after rewriting the seqnum we have `a.RANGEDEL.1`a.SET.100 smallestRangeKey := base.MakeInternalKey(sm.SmallestRangeKey.UserKey, 0, sm.SmallestRangeKey.Kind()) largestRangeKey := base.MakeExclusiveSentinelKey(sm.LargestRangeKey.Kind(), sm.LargestRangeKey.UserKey) meta.ExtendRangeKeyBounds(opts.Comparer.Compare, smallestRangeKey, largestRangeKey) @@ -211,6 +214,13 @@ func ingestLoad1External( // what parts of this sstable are referenced by other nodes. meta.FileBacking.Size = e.Size + if len(e.SyntheticPrefix) != 0 { + meta.PrefixReplacement = &manifest.PrefixReplacement{ + ContentPrefix: e.ContentPrefix, + SyntheticPrefix: e.SyntheticPrefix, + } + } + if err := meta.Validate(opts.Comparer.Compare, opts.Comparer.FormatKey); err != nil { return nil, err } @@ -1109,6 +1119,11 @@ type ExternalFile struct { // or range keys. If both structs are false, an error is returned during // ingestion. HasPointKey, HasRangeKey bool + // ContentPrefix and SyntheticPrefix denote a prefix replacement rule causing + // a file, in which all keys have prefix ContentPrefix, to appear whenever it + // is accessed as if those keys all instead have prefix SyntheticPrefix. + // SyntheticPrefix must be a prefix of both SmallestUserKey and LargestUserKey. + ContentPrefix, SyntheticPrefix []byte } // IngestWithStats does the same as Ingest, and additionally returns @@ -1293,6 +1308,13 @@ func (d *DB) ingest( if (exciseSpan.Valid() || len(shared) > 0 || len(external) > 0) && d.FormatMajorVersion() < FormatVirtualSSTables { return IngestOperationStats{}, errors.New("pebble: format major version too old for excise, shared or external sstable ingestion") } + if len(external) > 0 && d.FormatMajorVersion() < FormatSyntheticPrefixes { + for i := range external { + if len(external[i].SyntheticPrefix) > 0 { + return IngestOperationStats{}, errors.New("pebble: format major version too old for synthetic prefix ingestion") + } + } + } // Allocate file numbers for all of the files being ingested and mark them as // pending in order to prevent them from being deleted. Note that this causes // the file number ordering to be out of alignment with sequence number diff --git a/vendor/github.com/cockroachdb/pebble/internal/base/filenames.go b/vendor/github.com/cockroachdb/pebble/internal/base/filenames.go index 06098ab639..c4420ffb76 100644 --- a/vendor/github.com/cockroachdb/pebble/internal/base/filenames.go +++ b/vendor/github.com/cockroachdb/pebble/internal/base/filenames.go @@ -62,7 +62,6 @@ const ( FileTypeLock FileTypeTable FileTypeManifest - FileTypeCurrent FileTypeOptions FileTypeOldTemp FileTypeTemp @@ -79,8 +78,6 @@ func MakeFilename(fileType FileType, dfn DiskFileNum) string { return fmt.Sprintf("%s.sst", dfn) case FileTypeManifest: return fmt.Sprintf("MANIFEST-%s", dfn) - case FileTypeCurrent: - return "CURRENT" case FileTypeOptions: return fmt.Sprintf("OPTIONS-%s", dfn) case FileTypeOldTemp: @@ -100,8 +97,6 @@ func MakeFilepath(fs vfs.FS, dirname string, fileType FileType, dfn DiskFileNum) func ParseFilename(fs vfs.FS, filename string) (fileType FileType, dfn DiskFileNum, ok bool) { filename = fs.PathBase(filename) switch { - case filename == "CURRENT": - return FileTypeCurrent, 0, true case filename == "LOCK": return FileTypeLock, 0, true case strings.HasPrefix(filename, "MANIFEST-"): diff --git a/vendor/github.com/cockroachdb/pebble/internal/keyspan/assert_iter.go b/vendor/github.com/cockroachdb/pebble/internal/keyspan/assert_iter.go new file mode 100644 index 0000000000..e71c6eb5a6 --- /dev/null +++ b/vendor/github.com/cockroachdb/pebble/internal/keyspan/assert_iter.go @@ -0,0 +1,173 @@ +// Copyright 2023 The LevelDB-Go and Pebble Authors. All rights reserved. Use +// of this source code is governed by a BSD-style license that can be found in +// the LICENSE file. + +package keyspan + +import ( + "fmt" + + "github.com/cockroachdb/errors" + "github.com/cockroachdb/pebble/internal/base" + "github.com/cockroachdb/pebble/internal/invariants" +) + +// Assert wraps an iterator and asserts that operations return sane results. +func Assert(iter FragmentIterator, cmp base.Compare) FragmentIterator { + return &assertIter{ + iter: iter, + cmp: cmp, + } +} + +// MaybeAssert wraps an iterator and asserts that operations return sane +// results if we are in testing mode. +func MaybeAssert(iter FragmentIterator, cmp base.Compare) FragmentIterator { + if invariants.Enabled && iter != nil { + // Don't wrap an assertIter. + if _, ok := iter.(*assertIter); !ok { + return Assert(iter, cmp) + } + } + return iter +} + +// AssertUserKeyBounds wraps an iterator and asserts that all spans are within +// the given bounds [lower, upper). +func AssertUserKeyBounds( + iter FragmentIterator, lower, upper []byte, cmp base.Compare, +) FragmentIterator { + return AssertBounds(iter, base.MakeSearchKey(lower), upper, cmp) +} + +// AssertBounds wraps an iterator and asserts that all spans are within the +// given bounds [lower.UserKey, upper), and that all keys in a span that starts +// exactly at lower.UserKey are >= lower. +// +// The asymmetry here is due to fragment spans having exclusive end user keys. +func AssertBounds( + iter FragmentIterator, lower base.InternalKey, upper []byte, cmp base.Compare, +) FragmentIterator { + i := &assertIter{ + iter: iter, + cmp: cmp, + } + i.checkBounds.enabled = true + i.checkBounds.lower = lower + i.checkBounds.upper = upper + return i +} + +// assertIter is a pass-through FragmentIterator wrapper which performs checks +// on what the wrapped iterator returns. +// +// It verifies that results for various operations are sane, and it optionally +// verifies that spans are within given bounds. +type assertIter struct { + iter FragmentIterator + cmp base.Compare + checkBounds struct { + enabled bool + lower base.InternalKey + upper []byte + } + lastSpanStart []byte + lastSpanEnd []byte +} + +var _ FragmentIterator = (*assertIter)(nil) + +func (i *assertIter) panicf(format string, args ...interface{}) { + str := fmt.Sprintf(format, args...) + panic(errors.AssertionFailedf("%s; wraps %T", str, i.iter)) +} + +func (i *assertIter) check(span *Span) { + i.lastSpanStart = i.lastSpanStart[:0] + i.lastSpanEnd = i.lastSpanEnd[:0] + if span == nil { + return + } + if i.checkBounds.enabled { + lower := i.checkBounds.lower + switch startCmp := i.cmp(span.Start, lower.UserKey); { + case startCmp < 0: + i.panicf("lower bound %q violated by span %s", lower.UserKey, span) + case startCmp == 0: + // Note: trailers are in descending order. + if len(span.Keys) > 0 && span.SmallestKey().Trailer > lower.Trailer { + i.panicf("lower bound %s violated by key %s", lower, span.SmallestKey()) + } + } + if i.cmp(span.End, i.checkBounds.upper) > 0 { + i.panicf("upper bound %q violated by span %s", i.checkBounds.upper, span) + } + } + // Save the span to check Next/Prev operations. + i.lastSpanStart = append(i.lastSpanStart, span.Start...) + i.lastSpanEnd = append(i.lastSpanEnd, span.End...) +} + +// SeekGE implements FragmentIterator. +func (i *assertIter) SeekGE(key []byte) *Span { + span := i.iter.SeekGE(key) + if span != nil && i.cmp(span.End, key) <= 0 { + i.panicf("incorrect SeekGE(%q) span %s", key, span) + } + i.check(span) + return span +} + +// SeekLT implements FragmentIterator. +func (i *assertIter) SeekLT(key []byte) *Span { + span := i.iter.SeekLT(key) + if span != nil && i.cmp(span.Start, key) >= 0 { + i.panicf("incorrect SeekLT(%q) span %s", key, span) + } + i.check(span) + return span +} + +// First implements FragmentIterator. +func (i *assertIter) First() *Span { + span := i.iter.First() + i.check(span) + return span +} + +// Last implements FragmentIterator. +func (i *assertIter) Last() *Span { + span := i.iter.Last() + i.check(span) + return span +} + +// Next implements FragmentIterator. +func (i *assertIter) Next() *Span { + span := i.iter.Next() + if span != nil && len(i.lastSpanEnd) > 0 && i.cmp(i.lastSpanEnd, span.Start) > 0 { + i.panicf("Next span %s not after last span end %q", span, i.lastSpanEnd) + } + i.check(span) + return span +} + +// Prev implements FragmentIterator. +func (i *assertIter) Prev() *Span { + span := i.iter.Prev() + if span != nil && len(i.lastSpanStart) > 0 && i.cmp(i.lastSpanStart, span.End) < 0 { + i.panicf("Prev span %s not before last span start %q", span, i.lastSpanStart) + } + i.check(span) + return span +} + +// Error implements FragmentIterator. +func (i *assertIter) Error() error { + return i.iter.Error() +} + +// Close implements FragmentIterator. +func (i *assertIter) Close() error { + return i.iter.Close() +} diff --git a/vendor/github.com/cockroachdb/pebble/internal/keyspan/filter.go b/vendor/github.com/cockroachdb/pebble/internal/keyspan/filter.go index a63a43ca8f..594d7fc3ea 100644 --- a/vendor/github.com/cockroachdb/pebble/internal/keyspan/filter.go +++ b/vendor/github.com/cockroachdb/pebble/internal/keyspan/filter.go @@ -36,7 +36,7 @@ var _ FragmentIterator = (*filteringIter)(nil) // Filter returns a new filteringIter that will filter the Spans from the // provided child iterator using the provided FilterFunc. func Filter(iter FragmentIterator, filter FilterFunc, cmp base.Compare) FragmentIterator { - return &filteringIter{iter: iter, filterFn: filter, cmp: cmp} + return MaybeAssert(&filteringIter{iter: iter, filterFn: filter, cmp: cmp}, cmp) } // SeekGE implements FragmentIterator. diff --git a/vendor/github.com/cockroachdb/pebble/internal/keyspan/interleaving_iter.go b/vendor/github.com/cockroachdb/pebble/internal/keyspan/interleaving_iter.go index e1fd600a24..0ce56e11ad 100644 --- a/vendor/github.com/cockroachdb/pebble/internal/keyspan/interleaving_iter.go +++ b/vendor/github.com/cockroachdb/pebble/internal/keyspan/interleaving_iter.go @@ -211,7 +211,7 @@ func (i *InterleavingIter) Init( cmp: comparer.Compare, comparer: comparer, pointIter: pointIter, - keyspanIter: keyspanIter, + keyspanIter: MaybeAssert(keyspanIter, comparer.Compare), mask: opts.Mask, lower: opts.LowerBound, upper: opts.UpperBound, diff --git a/vendor/github.com/cockroachdb/pebble/internal/keyspan/level_iter.go b/vendor/github.com/cockroachdb/pebble/internal/keyspan/level_iter.go index 6dd7ac6e23..d2f81fbcba 100644 --- a/vendor/github.com/cockroachdb/pebble/internal/keyspan/level_iter.go +++ b/vendor/github.com/cockroachdb/pebble/internal/keyspan/level_iter.go @@ -180,6 +180,7 @@ func (l *LevelIter) loadFile(file *manifest.FileMetadata, dir int) loadFileRetur } if indicator != fileAlreadyLoaded { l.iter, l.err = l.newIter(file, l.tableOpts) + l.iter = MaybeAssert(l.iter, l.cmp) indicator = newFileLoaded } if l.err != nil { diff --git a/vendor/github.com/cockroachdb/pebble/internal/manifest/version.go b/vendor/github.com/cockroachdb/pebble/internal/manifest/version.go index d20240af97..fe086d1aed 100644 --- a/vendor/github.com/cockroachdb/pebble/internal/manifest/version.go +++ b/vendor/github.com/cockroachdb/pebble/internal/manifest/version.go @@ -269,6 +269,35 @@ type FileMetadata struct { boundTypeSmallest, boundTypeLargest boundType // Virtual is true if the FileMetadata belongs to a virtual sstable. Virtual bool + + // PrefixReplacement is used for virtual files where the backing file has a + // different prefix on its keys than the span in which it is being exposed. + PrefixReplacement *PrefixReplacement +} + +// PrefixReplacement represents a read-time replacement of a key prefix. +type PrefixReplacement struct { + ContentPrefix, SyntheticPrefix []byte +} + +// ReplaceArg replaces the new prefix in the argument with the original prefix. +func (p *PrefixReplacement) ReplaceArg(src []byte) []byte { + return p.replace(src, p.SyntheticPrefix, p.ContentPrefix) +} + +// ReplaceResult replaces the original prefix in the result with the new prefix. +func (p *PrefixReplacement) ReplaceResult(key []byte) []byte { + return p.replace(key, p.ContentPrefix, p.SyntheticPrefix) +} + +func (p *PrefixReplacement) replace(key, from, to []byte) []byte { + if !bytes.HasPrefix(key, from) { + panic(fmt.Sprintf("unexpected prefix in replace: %s", key)) + } + result := make([]byte, 0, len(to)+(len(key)-len(from))) + result = append(result, to...) + result = append(result, key[len(from):]...) + return result } // PhysicalFileMeta is used by functions which want a guarantee that their input @@ -851,6 +880,18 @@ func (m *FileMetadata) Validate(cmp Compare, formatKey base.FormatKey) error { return base.CorruptionErrorf("file metadata FileBacking not set") } + if m.PrefixReplacement != nil { + if !m.Virtual { + return base.CorruptionErrorf("prefix replacement rule set with non-virtual file") + } + if !bytes.HasPrefix(m.Smallest.UserKey, m.PrefixReplacement.SyntheticPrefix) { + return base.CorruptionErrorf("virtual file with prefix replacement rules has smallest key with a different prefix: %s", m.Smallest.Pretty(formatKey)) + } + if !bytes.HasPrefix(m.Largest.UserKey, m.PrefixReplacement.SyntheticPrefix) { + return base.CorruptionErrorf("virtual file with prefix replacement rules has largest key with a different prefix: %s", m.Largest.Pretty(formatKey)) + } + } + return nil } @@ -1238,6 +1279,119 @@ func (v *Version) InitL0Sublevels( return err } +// CalculateInuseKeyRanges examines file metadata in levels [level, maxLevel] +// within bounds [smallest,largest], returning an ordered slice of key ranges +// that include all keys that exist within levels [level, maxLevel] and within +// [smallest,largest]. +func (v *Version) CalculateInuseKeyRanges( + cmp base.Compare, level, maxLevel int, smallest, largest []byte, +) []UserKeyRange { + // Use two slices, alternating which one is input and which one is output + // as we descend the LSM. + var input, output []UserKeyRange + + // L0 requires special treatment, since sstables within L0 may overlap. + // We use the L0 Sublevels structure to efficiently calculate the merged + // in-use key ranges. + if level == 0 { + output = v.L0Sublevels.InUseKeyRanges(smallest, largest) + level++ + } + + for ; level <= maxLevel; level++ { + // NB: We always treat `largest` as inclusive for simplicity, because + // there's little consequence to calculating slightly broader in-use key + // ranges. + overlaps := v.Overlaps(level, cmp, smallest, largest, false /* exclusiveEnd */) + iter := overlaps.Iter() + + // We may already have in-use key ranges from higher levels. Iterate + // through both our accumulated in-use key ranges and this level's + // files, merging the two. + // + // Tables higher within the LSM have broader key spaces. We use this + // when possible to seek past a level's files that are contained by + // our current accumulated in-use key ranges. This helps avoid + // per-sstable work during flushes or compactions in high levels which + // overlap the majority of the LSM's sstables. + input, output = output, input + output = output[:0] + + var currFile *FileMetadata + var currAccum *UserKeyRange + if len(input) > 0 { + currAccum, input = &input[0], input[1:] + } + + // If we have an accumulated key range and its start is ≤ smallest, + // we can seek to the accumulated range's end. Otherwise, we need to + // start at the first overlapping file within the level. + if currAccum != nil && cmp(currAccum.Start, smallest) <= 0 { + currFile = seekGT(&iter, cmp, currAccum.End) + } else { + currFile = iter.First() + } + + for currFile != nil || currAccum != nil { + // If we've exhausted either the files in the level or the + // accumulated key ranges, we just need to append the one we have. + // If we have both a currFile and a currAccum, they either overlap + // or they're disjoint. If they're disjoint, we append whichever + // one sorts first and move on to the next file or range. If they + // overlap, we merge them into currAccum and proceed to the next + // file. + switch { + case currAccum == nil || (currFile != nil && cmp(currFile.Largest.UserKey, currAccum.Start) < 0): + // This file is strictly before the current accumulated range, + // or there are no more accumulated ranges. + output = append(output, UserKeyRange{ + Start: currFile.Smallest.UserKey, + End: currFile.Largest.UserKey, + }) + currFile = iter.Next() + case currFile == nil || (currAccum != nil && cmp(currAccum.End, currFile.Smallest.UserKey) < 0): + // The current accumulated key range is strictly before the + // current file, or there are no more files. + output = append(output, *currAccum) + currAccum = nil + if len(input) > 0 { + currAccum, input = &input[0], input[1:] + } + default: + // The current accumulated range and the current file overlap. + // Adjust the accumulated range to be the union. + if cmp(currFile.Smallest.UserKey, currAccum.Start) < 0 { + currAccum.Start = currFile.Smallest.UserKey + } + if cmp(currFile.Largest.UserKey, currAccum.End) > 0 { + currAccum.End = currFile.Largest.UserKey + } + + // Extending `currAccum`'s end boundary may have caused it to + // overlap with `input` key ranges that we haven't processed + // yet. Merge any such key ranges. + for len(input) > 0 && cmp(input[0].Start, currAccum.End) <= 0 { + if cmp(input[0].End, currAccum.End) > 0 { + currAccum.End = input[0].End + } + input = input[1:] + } + // Seek the level iterator past our current accumulated end. + currFile = seekGT(&iter, cmp, currAccum.End) + } + } + } + return output +} + +func seekGT(iter *LevelIterator, cmp base.Compare, key []byte) *FileMetadata { + f := iter.SeekGE(cmp, key) + for f != nil && cmp(f.Largest.UserKey, key) == 0 { + f = iter.Next() + } + return f +} + // Contains returns a boolean indicating whether the provided file exists in // the version at the given level. If level is non-zero then Contains binary // searches among the files. If level is zero, Contains scans the entire @@ -1343,20 +1497,16 @@ func (v *Version) Overlaps( // CheckOrdering checks that the files are consistent with respect to // increasing file numbers (for level 0 files) and increasing and non- // overlapping internal key ranges (for level non-0 files). -func (v *Version) CheckOrdering( - cmp Compare, format base.FormatKey, order OrderingInvariants, -) error { +func (v *Version) CheckOrdering(cmp Compare, format base.FormatKey) error { for sublevel := len(v.L0SublevelFiles) - 1; sublevel >= 0; sublevel-- { sublevelIter := v.L0SublevelFiles[sublevel].Iter() - // Sublevels have NEVER allowed split user keys, so we can pass - // ProhibitSplitUserKeys. - if err := CheckOrdering(cmp, format, L0Sublevel(sublevel), sublevelIter, ProhibitSplitUserKeys); err != nil { + if err := CheckOrdering(cmp, format, L0Sublevel(sublevel), sublevelIter); err != nil { return base.CorruptionErrorf("%s\n%s", err, v.DebugString(format)) } } for level, lm := range v.Levels { - if err := CheckOrdering(cmp, format, Level(level), lm.Iter(), order); err != nil { + if err := CheckOrdering(cmp, format, Level(level), lm.Iter()); err != nil { return base.CorruptionErrorf("%s\n%s", err, v.DebugString(format)) } } @@ -1425,34 +1575,10 @@ func (l *VersionList) Remove(v *Version) { v.list = nil // avoid memory leaks } -// OrderingInvariants dictates the file ordering invariants active. -type OrderingInvariants int8 - -const ( - // ProhibitSplitUserKeys indicates that adjacent files within a level cannot - // contain the same user key. - ProhibitSplitUserKeys OrderingInvariants = iota - // AllowSplitUserKeys indicates that adjacent files within a level may - // contain the same user key. This is only allowed by historical format - // major versions. - // - // TODO(jackson): Remove. - AllowSplitUserKeys -) - // CheckOrdering checks that the files are consistent with respect to // seqnums (for level 0 files -- see detailed comment below) and increasing and non- // overlapping internal key ranges (for non-level 0 files). -// -// The ordering field may be passed AllowSplitUserKeys to allow adjacent files that are both -// inclusive of the same user key. Pebble no longer creates version edits -// installing such files, and Pebble databases with sufficiently high format -// major version should no longer have any such files within their LSM. -// TODO(jackson): Remove AllowSplitUserKeys when we remove support for the -// earlier format major versions. -func CheckOrdering( - cmp Compare, format base.FormatKey, level Level, files LevelIterator, ordering OrderingInvariants, -) error { +func CheckOrdering(cmp Compare, format base.FormatKey, level Level, files LevelIterator) error { // The invariants to check for L0 sublevels are the same as the ones to // check for all other levels. However, if L0 is not organized into // sublevels, or if all L0 files are being passed in, we do the legacy L0 @@ -1531,28 +1657,15 @@ func CheckOrdering( f.Smallest.Pretty(format), f.Largest.Pretty(format)) } - // What's considered "overlapping" is dependent on the format - // major version. If ordering=ProhibitSplitUserKeys, then both - // files cannot contain keys with the same user keys. If the - // bounds have the same user key, the previous file's boundary - // must have a Trailer indicating that it's exclusive. - switch ordering { - case AllowSplitUserKeys: - if base.InternalCompare(cmp, prev.Largest, f.Smallest) >= 0 { - return base.CorruptionErrorf("%s files %s and %s have overlapping ranges: [%s-%s] vs [%s-%s]", - errors.Safe(level), errors.Safe(prev.FileNum), errors.Safe(f.FileNum), - prev.Smallest.Pretty(format), prev.Largest.Pretty(format), - f.Smallest.Pretty(format), f.Largest.Pretty(format)) - } - case ProhibitSplitUserKeys: - if v := cmp(prev.Largest.UserKey, f.Smallest.UserKey); v > 0 || (v == 0 && !prev.Largest.IsExclusiveSentinel()) { - return base.CorruptionErrorf("%s files %s and %s have overlapping ranges: [%s-%s] vs [%s-%s]", - errors.Safe(level), errors.Safe(prev.FileNum), errors.Safe(f.FileNum), - prev.Smallest.Pretty(format), prev.Largest.Pretty(format), - f.Smallest.Pretty(format), f.Largest.Pretty(format)) - } - default: - panic("unreachable") + // In all supported format major version, split user keys are + // prohibited, so both files cannot contain keys with the same user + // keys. If the bounds have the same user key, the previous file's + // boundary must have a Trailer indicating that it's exclusive. + if v := cmp(prev.Largest.UserKey, f.Smallest.UserKey); v > 0 || (v == 0 && !prev.Largest.IsExclusiveSentinel()) { + return base.CorruptionErrorf("%s files %s and %s have overlapping ranges: [%s-%s] vs [%s-%s]", + errors.Safe(level), errors.Safe(prev.FileNum), errors.Safe(f.FileNum), + prev.Smallest.Pretty(format), prev.Largest.Pretty(format), + f.Smallest.Pretty(format), f.Largest.Pretty(format)) } } } diff --git a/vendor/github.com/cockroachdb/pebble/internal/manifest/version_edit.go b/vendor/github.com/cockroachdb/pebble/internal/manifest/version_edit.go index 08b3555e35..01491cf69c 100644 --- a/vendor/github.com/cockroachdb/pebble/internal/manifest/version_edit.go +++ b/vendor/github.com/cockroachdb/pebble/internal/manifest/version_edit.go @@ -63,6 +63,7 @@ const ( customTagPathID = 65 customTagNonSafeIgnoreMask = 1 << 6 customTagVirtual = 66 + customTagPrefixRewrite = 67 ) // DeletedFileEntry holds the state for a file deletion from a level. The file @@ -336,6 +337,7 @@ func (v *VersionEdit) Decode(r io.Reader) error { virtual bool backingFileNum uint64 }{} + var virtualPrefix *PrefixReplacement if tag == tagNewFile4 || tag == tagNewFile5 { for { customTag, err := d.readUvarint() @@ -352,6 +354,20 @@ func (v *VersionEdit) Decode(r io.Reader) error { } virtualState.backingFileNum = n continue + } else if customTag == customTagPrefixRewrite { + content, err := d.readBytes() + if err != nil { + return err + } + synthetic, err := d.readBytes() + if err != nil { + return err + } + virtualPrefix = &PrefixReplacement{ + ContentPrefix: content, + SyntheticPrefix: synthetic, + } + continue } field, err := d.readBytes() @@ -390,6 +406,7 @@ func (v *VersionEdit) Decode(r io.Reader) error { LargestSeqNum: largestSeqNum, MarkedForCompaction: markedForCompaction, Virtual: virtualState.virtual, + PrefixReplacement: virtualPrefix, } if tag != tagNewFile5 { // no range keys present m.SmallestPointKey = base.DecodeInternalKey(smallestPointKey) @@ -606,6 +623,11 @@ func (v *VersionEdit) Encode(w io.Writer) error { e.writeUvarint(customTagVirtual) e.writeUvarint(uint64(x.Meta.FileBacking.DiskFileNum.FileNum())) } + if x.Meta.PrefixReplacement != nil { + e.writeUvarint(customTagPrefixRewrite) + e.writeBytes(x.Meta.PrefixReplacement.ContentPrefix) + e.writeBytes(x.Meta.PrefixReplacement.SyntheticPrefix) + } e.writeUvarint(customTagTerminate) } } @@ -855,7 +877,6 @@ func AccumulateIncompleteAndApplySingleVE( backingStateMap map[base.DiskFileNum]*FileBacking, addBackingFunc func(*FileBacking), removeBackingFunc func(base.DiskFileNum), - orderingInvariants OrderingInvariants, ) (_ *Version, zombies map[base.DiskFileNum]uint64, _ error) { if len(ve.RemovedBackingTables) != 0 { panic("pebble: invalid incomplete version edit") @@ -866,9 +887,7 @@ func AccumulateIncompleteAndApplySingleVE( return nil, nil, err } zombies = make(map[base.DiskFileNum]uint64) - v, err := b.Apply( - curr, cmp, formatKey, flushSplitBytes, readCompactionRate, zombies, orderingInvariants, - ) + v, err := b.Apply(curr, cmp, formatKey, flushSplitBytes, readCompactionRate, zombies) if err != nil { return nil, nil, err } @@ -908,7 +927,6 @@ func (b *BulkVersionEdit) Apply( flushSplitBytes int64, readCompactionRate int64, zombies map[base.DiskFileNum]uint64, - orderingInvariants OrderingInvariants, ) (*Version, error) { addZombie := func(state *FileBacking) { if zombies != nil { @@ -1092,7 +1110,7 @@ func (b *BulkVersionEdit) Apply( } else if err := v.InitL0Sublevels(cmp, formatKey, flushSplitBytes); err != nil { return nil, errors.Wrap(err, "pebble: internal error") } - if err := CheckOrdering(cmp, formatKey, Level(0), v.Levels[level].Iter(), orderingInvariants); err != nil { + if err := CheckOrdering(cmp, formatKey, Level(0), v.Levels[level].Iter()); err != nil { return nil, errors.Wrap(err, "pebble: internal error") } continue @@ -1113,7 +1131,7 @@ func (b *BulkVersionEdit) Apply( end.Prev() } }) - if err := CheckOrdering(cmp, formatKey, Level(level), check.Iter(), orderingInvariants); err != nil { + if err := CheckOrdering(cmp, formatKey, Level(level), check.Iter()); err != nil { return nil, errors.Wrap(err, "pebble: internal error") } } diff --git a/vendor/github.com/cockroachdb/pebble/open.go b/vendor/github.com/cockroachdb/pebble/open.go index d0b5e189bd..33616d1aa2 100644 --- a/vendor/github.com/cockroachdb/pebble/open.go +++ b/vendor/github.com/cockroachdb/pebble/open.go @@ -71,7 +71,7 @@ func TableCacheSize(maxOpenFiles int) int { } // Open opens a DB whose files live in the given directory. -func Open(dirname string, opts *Options) (db *DB, _ error) { +func Open(dirname string, opts *Options) (db *DB, err error) { // Make a copy of the options so that we don't mutate the passed in options. opts = opts.Clone() opts = opts.EnsureDefaults() @@ -136,8 +136,28 @@ func Open(dirname string, opts *Options) (db *DB, _ error) { } }() + noFormatVersionMarker := formatVersion == FormatDefault + if noFormatVersionMarker { + // There is no format version marker file. There are three cases: + // - we are trying to open an existing store that was created at + // FormatMostCompatible (the only one without a version marker file) + // - we are creating a new store; + // - we are retrying a failed creation. + // + // To error in the first case, we set ErrorIfNotPristine. + opts.ErrorIfNotPristine = true + formatVersion = FormatMinSupported + defer func() { + if err != nil && errors.Is(err, ErrDBNotPristine) { + // We must be trying to open an existing store at FormatMostCompatible. + // Correct the error in this case -we + err = errors.Newf("pebble: database %q written in format major version 1 which is no longer supported", dirname) + } + }() + } + // Find the currently active manifest, if there is one. - manifestMarker, manifestFileNum, manifestExists, err := findCurrentManifest(formatVersion, opts.FS, dirname) + manifestMarker, manifestFileNum, manifestExists, err := findCurrentManifest(opts.FS, dirname) if err != nil { return nil, errors.Wrapf(err, "pebble: database %q", dirname) } @@ -252,8 +272,6 @@ func Open(dirname string, opts *Options) (db *DB, _ error) { jobID := d.mu.nextJobID d.mu.nextJobID++ - setCurrent := setCurrentFunc(d.FormatMajorVersion(), manifestMarker, opts.FS, dirname, d.dataDir) - if !manifestExists { // DB does not exist. if d.opts.ErrorIfNotExists || d.opts.ReadOnly { @@ -261,7 +279,7 @@ func Open(dirname string, opts *Options) (db *DB, _ error) { } // Create the DB. - if err := d.mu.versions.create(jobID, dirname, opts, manifestMarker, setCurrent, d.FormatMajorVersion, &d.mu.Mutex); err != nil { + if err := d.mu.versions.create(jobID, dirname, opts, manifestMarker, d.FormatMajorVersion, &d.mu.Mutex); err != nil { return nil, err } } else { @@ -269,7 +287,7 @@ func Open(dirname string, opts *Options) (db *DB, _ error) { return nil, errors.Wrapf(ErrDBAlreadyExists, "dirname=%q", dirname) } // Load the version set. - if err := d.mu.versions.load(dirname, opts, manifestFileNum, manifestMarker, setCurrent, d.FormatMajorVersion, &d.mu.Mutex); err != nil { + if err := d.mu.versions.load(dirname, opts, manifestFileNum, manifestMarker, d.FormatMajorVersion, &d.mu.Mutex); err != nil { return nil, err } if opts.ErrorIfNotPristine { @@ -485,20 +503,27 @@ func Open(dirname string, opts *Options) (db *DB, _ error) { } d.updateReadStateLocked(d.opts.DebugCheck) - // If the Options specify a format major version higher than the - // loaded database's, upgrade it. If this is a new database, this - // code path also performs an initial upgrade from the starting - // implicit MostCompatible version. - // - // We ratchet the version this far into Open so that migrations have a read - // state available. - if !d.opts.ReadOnly && opts.FormatMajorVersion > d.FormatMajorVersion() { - if err := d.ratchetFormatMajorVersionLocked(opts.FormatMajorVersion); err != nil { - return nil, err + if !d.opts.ReadOnly { + // If the Options specify a format major version higher than the + // loaded database's, upgrade it. If this is a new database, this + // code path also performs an initial upgrade from the starting + // implicit MinSupported version. + // + // We ratchet the version this far into Open so that migrations have a read + // state available. Note that this also results in creating/updating the + // format version marker file. + if opts.FormatMajorVersion > d.FormatMajorVersion() { + if err := d.ratchetFormatMajorVersionLocked(opts.FormatMajorVersion); err != nil { + return nil, err + } + } else if noFormatVersionMarker { + // We are creating a new store at MinSupported. Create the format version + // marker file. + if err := d.writeFormatVersionMarker(d.FormatMajorVersion()); err != nil { + return nil, err + } } - } - if !d.opts.ReadOnly { // Write the current options to disk. d.optionsFileNum = d.mu.versions.getNextDiskFileNum() tmpPath := base.MakeFilepath(opts.FS, dirname, fileTypeTemp, d.optionsFileNum) @@ -680,10 +705,17 @@ func GetVersion(dir string, fs vfs.FS) (string, error) { return version, nil } -// replayWAL replays the edits in the specified log file. If the DB is in -// read only mode, then the WALs are replayed into memtables and not flushed. If -// the DB is not in read only mode, then the contents of the WAL are guaranteed -// to be flushed. +// replayWAL replays the edits in the specified log file. If the DB is in read +// only mode, then the WALs are replayed into memtables and not flushed. If +// the DB is not in read only mode, then the contents of the WAL are +// guaranteed to be flushed. Note that this flushing is very important for +// guaranteeing durability: the application may have had a number of pending +// fsyncs to the WAL before the process crashed, and those fsyncs may not have +// happened but the corresponding data may now be readable from the WAL (while +// sitting in write-back caches in the kernel or the storage device). By +// reading the WAL (including the non-fsynced data) and then flushing all +// these changes (flush does fsyncs), we are able to guarantee that the +// initial state of the DB is durable. // // The toFlush return value is a list of flushables associated with the WAL // being replayed which will be flushed. Once the version edit has been applied @@ -994,7 +1026,7 @@ func (d *DB) replayWAL( flushMem() // mem is nil here. - if !d.opts.ReadOnly { + if !d.opts.ReadOnly && batchesReplayed > 0 { err = updateVE() if err != nil { return nil, 0, err @@ -1044,7 +1076,7 @@ func Peek(dirname string, fs vfs.FS) (*DBDesc, error) { } // Find the currently active manifest, if there is one. - manifestMarker, manifestFileNum, exists, err := findCurrentManifest(vers, fs, dirname) + manifestMarker, manifestFileNum, exists, err := findCurrentManifest(fs, dirname) if err != nil { return nil, err } diff --git a/vendor/github.com/cockroachdb/pebble/options.go b/vendor/github.com/cockroachdb/pebble/options.go index 92d98eb08c..3451375d44 100644 --- a/vendor/github.com/cockroachdb/pebble/options.go +++ b/vendor/github.com/cockroachdb/pebble/options.go @@ -1122,7 +1122,7 @@ func (o *Options) EnsureDefaults() *Options { } if o.FormatMajorVersion == FormatDefault { - o.FormatMajorVersion = FormatMostCompatible + o.FormatMajorVersion = FormatMinSupported } if o.FS == nil { @@ -1489,7 +1489,7 @@ func (o *Options) Parse(s string, hooks *ParseHooks) error { var v uint64 v, err = strconv.ParseUint(value, 10, 64) if vers := FormatMajorVersion(v); vers > internalFormatNewest || vers == FormatDefault { - err = errors.Newf("unknown format major version %d", o.FormatMajorVersion) + err = errors.Newf("unsupported format major version %d", o.FormatMajorVersion) } if err == nil { o.FormatMajorVersion = FormatMajorVersion(v) @@ -1740,9 +1740,9 @@ func (o *Options) Validate() error { fmt.Fprintf(&buf, "MemTableStopWritesThreshold (%d) must be >= 2\n", o.MemTableStopWritesThreshold) } - if o.FormatMajorVersion > internalFormatNewest { - fmt.Fprintf(&buf, "FormatMajorVersion (%d) must be <= %d\n", - o.FormatMajorVersion, internalFormatNewest) + if o.FormatMajorVersion < FormatMinSupported || o.FormatMajorVersion > internalFormatNewest { + fmt.Fprintf(&buf, "FormatMajorVersion (%d) must be between %d and %d\n", + o.FormatMajorVersion, FormatMinSupported, internalFormatNewest) } if o.TableCache != nil && o.Cache != o.TableCache.cache { fmt.Fprintf(&buf, "underlying cache in the TableCache and the Cache dont match\n") diff --git a/vendor/github.com/cockroachdb/pebble/sstable/format.go b/vendor/github.com/cockroachdb/pebble/sstable/format.go index 82310a55ba..aa951cc1d0 100644 --- a/vendor/github.com/cockroachdb/pebble/sstable/format.go +++ b/vendor/github.com/cockroachdb/pebble/sstable/format.go @@ -19,15 +19,16 @@ type TableFormat uint32 // Pebble (i.e. the history is linear). const ( TableFormatUnspecified TableFormat = iota - TableFormatLevelDB - TableFormatRocksDBv2 - TableFormatPebblev1 // Block properties. - TableFormatPebblev2 // Range keys. - TableFormatPebblev3 // Value blocks. - TableFormatPebblev4 // DELSIZED tombstones. + _ // TableFormatLevelDB; deprecated. + _ // TableFormatRocksDBv2; deprecated. + TableFormatPebblev1 // Block properties. + TableFormatPebblev2 // Range keys. + TableFormatPebblev3 // Value blocks. + TableFormatPebblev4 // DELSIZED tombstones. NumTableFormats - TableFormatMax = NumTableFormats - 1 + TableFormatMax = NumTableFormats - 1 + TableFormatMinSupported = TableFormatPebblev1 ) // TableFormatPebblev4, in addition to DELSIZED, introduces the use of @@ -134,9 +135,31 @@ const ( // // Note that we do not need to do anything special at write time for // SETWITHDEL and SINGLEDEL. This is because these key kinds are treated -// specially only by compactions, which do not hide obsolete points. For -// regular reads, SETWITHDEL behaves the same as SET and SINGLEDEL behaves the -// same as DEL. +// specially only by compactions, which typically do not hide obsolete points +// (see exception below). For regular reads, SETWITHDEL behaves the same as +// SET and SINGLEDEL behaves the same as DEL. +// +// 2.1.1 Compaction reads of a foreign sstable +// +// Compaction reads of a foreign sstable behave like regular reads in that +// only non-obsolete points are exposed. Consider a L5 foreign sstable with +// b.SINGLEDEL that is non-obsolete followed by obsolete b.DEL. And a L6 +// foreign sstable with two b.SETs. The SINGLEDEL will be exposed, and not the +// DEL, but this is not a correctness issue since only one of the SETs in the +// L6 sstable will be exposed. However, this works only because we have +// limited the number of foreign sst levels to two, and is extremely fragile. +// For robust correctness, non-obsolete SINGLEDELs in foreign sstables should +// be exposed as DELs. +// +// Additionally, to avoid false positive accounting errors in DELSIZED, we +// should expose them as DEL. +// +// NB: as of writing this comment, we do not have end-to-end support for +// SINGLEDEL for disaggregated storage since pointCollapsingIterator (used by +// ScanInternal) does not support SINGLEDEL. So the disaggregated key spans +// are required to never have SINGLEDELs (which is fine for CockroachDB since +// only the MVCC key space uses disaggregated storage, and SINGLEDELs are only +// used for the non-MVCC locks and intents). // // 2.2 Strictness and MERGE // @@ -185,15 +208,6 @@ const ( // corresponding internal TableFormat. func ParseTableFormat(magic []byte, version uint32) (TableFormat, error) { switch string(magic) { - case levelDBMagic: - return TableFormatLevelDB, nil - case rocksDBMagic: - if version != rocksDBFormatVersion2 { - return TableFormatUnspecified, base.CorruptionErrorf( - "pebble/table: unsupported rocksdb format version %d", errors.Safe(version), - ) - } - return TableFormatRocksDBv2, nil case pebbleDBMagic: switch version { case 1: @@ -219,10 +233,6 @@ func ParseTableFormat(magic []byte, version uint32) (TableFormat, error) { // AsTuple returns the TableFormat's (Magic String, Version) tuple. func (f TableFormat) AsTuple() (string, uint32) { switch f { - case TableFormatLevelDB: - return levelDBMagic, 0 - case TableFormatRocksDBv2: - return rocksDBMagic, 2 case TableFormatPebblev1: return pebbleDBMagic, 1 case TableFormatPebblev2: @@ -239,10 +249,6 @@ func (f TableFormat) AsTuple() (string, uint32) { // String returns the TableFormat (Magic String,Version) tuple. func (f TableFormat) String() string { switch f { - case TableFormatLevelDB: - return "(LevelDB)" - case TableFormatRocksDBv2: - return "(RocksDB,v2)" case TableFormatPebblev1: return "(Pebble,v1)" case TableFormatPebblev2: diff --git a/vendor/github.com/cockroachdb/pebble/sstable/options.go b/vendor/github.com/cockroachdb/pebble/sstable/options.go index c5e1f7935f..7bf3d79907 100644 --- a/vendor/github.com/cockroachdb/pebble/sstable/options.go +++ b/vendor/github.com/cockroachdb/pebble/sstable/options.go @@ -228,9 +228,7 @@ type WriterOptions struct { MergerName string // TableFormat specifies the format version for writing sstables. The default - // is TableFormatRocksDBv2 which creates RocksDB compatible sstables. Use - // TableFormatLevelDB to create LevelDB compatible sstable which can be used - // by a wider range of tools and libraries. + // is TableFormatMinSupported. TableFormat TableFormat // IsStrictObsolete is only relevant for >= TableFormatPebblev4. See comment @@ -299,7 +297,7 @@ func (o WriterOptions) ensureDefaults() WriterOptions { // By default, if the table format is not specified, fall back to using the // most compatible format. if o.TableFormat == TableFormatUnspecified { - o.TableFormat = TableFormatRocksDBv2 + o.TableFormat = TableFormatMinSupported } return o } diff --git a/vendor/github.com/cockroachdb/pebble/sstable/prefix_replacing_iterator.go b/vendor/github.com/cockroachdb/pebble/sstable/prefix_replacing_iterator.go new file mode 100644 index 0000000000..895ccf5619 --- /dev/null +++ b/vendor/github.com/cockroachdb/pebble/sstable/prefix_replacing_iterator.go @@ -0,0 +1,265 @@ +// Copyright 2023 The LevelDB-Go and Pebble Authors. All rights reserved. Use +// of this source code is governed by a BSD-style license that can be found in +// the LICENSE file. + +package sstable + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + + "github.com/cockroachdb/errors" + "github.com/cockroachdb/pebble/internal/base" + "github.com/cockroachdb/pebble/internal/invariants" + "github.com/cockroachdb/pebble/internal/keyspan" +) + +type prefixReplacingIterator struct { + i Iterator + cmp base.Compare + src, dst []byte + arg, arg2 []byte + res InternalKey + err error +} + +var errInputPrefixMismatch = errors.New("key argument does not have prefix required for replacement") +var errOutputPrefixMismatch = errors.New("key returned does not have prefix required for replacement") + +var _ Iterator = (*prefixReplacingIterator)(nil) + +// newPrefixReplacingIterator wraps an iterator over keys that have prefix `src` +// in an iterator that will make them appear to have prefix `dst`. Every key +// passed as an argument to methods on this iterator must have prefix `dst`, and +// every key produced by the underlying iterator must have prefix `src`. +// +// INVARIANT: len(dst) > 0. +func newPrefixReplacingIterator(i Iterator, src, dst []byte, cmp base.Compare) Iterator { + if invariants.Enabled && len(dst) == 0 { + panic("newPrefixReplacingIterator called without synthetic prefix") + } + return &prefixReplacingIterator{ + i: i, + cmp: cmp, + src: src, dst: dst, + arg: append([]byte{}, src...), arg2: append([]byte{}, src...), + res: InternalKey{UserKey: append([]byte{}, dst...)}, + } +} + +func (p *prefixReplacingIterator) SetContext(ctx context.Context) { + p.i.SetContext(ctx) +} + +func (p *prefixReplacingIterator) rewriteArg(key []byte) []byte { + if !bytes.HasPrefix(key, p.dst) { + p.err = errInputPrefixMismatch + return key + } + p.arg = append(p.arg[:len(p.src)], key[len(p.dst):]...) + return p.arg +} + +func (p *prefixReplacingIterator) rewriteArg2(key []byte) []byte { + if !bytes.HasPrefix(key, p.dst) { + p.err = errInputPrefixMismatch + return key + } + p.arg2 = append(p.arg2[:len(p.src)], key[len(p.dst):]...) + return p.arg2 +} + +func (p *prefixReplacingIterator) rewriteResult( + k *InternalKey, v base.LazyValue, +) (*InternalKey, base.LazyValue) { + if k == nil { + return k, v + } + if !bytes.HasPrefix(k.UserKey, p.src) { + p.err = errOutputPrefixMismatch + if invariants.Enabled { + panic(p.err) + } + return nil, base.LazyValue{} + } + p.res.Trailer = k.Trailer + p.res.UserKey = append(p.res.UserKey[:len(p.dst)], k.UserKey[len(p.src):]...) + return &p.res, v +} + +// SeekGE implements the Iterator interface. +func (p *prefixReplacingIterator) SeekGE( + key []byte, flags base.SeekGEFlags, +) (*InternalKey, base.LazyValue) { + return p.rewriteResult(p.i.SeekGE(p.rewriteArg(key), flags)) +} + +// SeekPrefixGE implements the Iterator interface. +func (p *prefixReplacingIterator) SeekPrefixGE( + prefix, key []byte, flags base.SeekGEFlags, +) (*InternalKey, base.LazyValue) { + return p.rewriteResult(p.i.SeekPrefixGE(p.rewriteArg2(prefix), p.rewriteArg(key), flags)) +} + +// SeekLT implements the Iterator interface. +func (p *prefixReplacingIterator) SeekLT( + key []byte, flags base.SeekLTFlags, +) (*InternalKey, base.LazyValue) { + cmp := p.cmp(key, p.dst) + if cmp < 0 { + // Exhaust the iterator by Prev()ing before the First key. + p.i.First() + return p.rewriteResult(p.i.Prev()) + } + return p.rewriteResult(p.i.SeekLT(p.rewriteArg(key), flags)) +} + +// First implements the Iterator interface. +func (p *prefixReplacingIterator) First() (*InternalKey, base.LazyValue) { + return p.rewriteResult(p.i.First()) +} + +// Last implements the Iterator interface. +func (p *prefixReplacingIterator) Last() (*InternalKey, base.LazyValue) { + return p.rewriteResult(p.i.Last()) +} + +// Next implements the Iterator interface. +func (p *prefixReplacingIterator) Next() (*InternalKey, base.LazyValue) { + return p.rewriteResult(p.i.Next()) +} + +// NextPrefix implements the Iterator interface. +func (p *prefixReplacingIterator) NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) { + return p.rewriteResult(p.i.NextPrefix(p.rewriteArg(succKey))) +} + +// Prev implements the Iterator interface. +func (p *prefixReplacingIterator) Prev() (*InternalKey, base.LazyValue) { + return p.rewriteResult(p.i.Prev()) +} + +// Error implements the Iterator interface. +func (p *prefixReplacingIterator) Error() error { + if p.err != nil { + return p.err + } + return p.i.Error() +} + +// Close implements the Iterator interface. +func (p *prefixReplacingIterator) Close() error { + return p.i.Close() +} + +// SetBounds implements the Iterator interface. +func (p *prefixReplacingIterator) SetBounds(lower, upper []byte) { + // Check if the underlying iterator requires un-rewritten bounds, i.e. if it + // is going to rewrite them itself or pass them to something e.g. vState that + // will rewrite them. + if x, ok := p.i.(interface{ SetBoundsWithSyntheticPrefix() bool }); ok && x.SetBoundsWithSyntheticPrefix() { + p.i.SetBounds(lower, upper) + return + } + p.i.SetBounds(p.rewriteArg(lower), p.rewriteArg2(upper)) +} + +func (p *prefixReplacingIterator) MaybeFilteredKeys() bool { + return p.i.MaybeFilteredKeys() +} + +// String implements the Iterator interface. +func (p *prefixReplacingIterator) String() string { + return fmt.Sprintf("%s [%s->%s]", p.i.String(), hex.EncodeToString(p.src), hex.EncodeToString(p.dst)) +} + +func (p *prefixReplacingIterator) SetCloseHook(fn func(i Iterator) error) { + p.i.SetCloseHook(fn) +} + +type prefixReplacingFragmentIterator struct { + i keyspan.FragmentIterator + err error + src, dst []byte + arg []byte + out1, out2 []byte +} + +// newPrefixReplacingFragmentIterator wraps a FragmentIterator over some reader +// that contains range keys in some key span to make those range keys appear to +// be remapped into some other key-span. +func newPrefixReplacingFragmentIterator( + i keyspan.FragmentIterator, src, dst []byte, +) keyspan.FragmentIterator { + return &prefixReplacingFragmentIterator{ + i: i, + src: src, dst: dst, + arg: append([]byte{}, src...), + out1: append([]byte(nil), dst...), + out2: append([]byte(nil), dst...), + } +} + +func (p *prefixReplacingFragmentIterator) rewriteArg(key []byte) []byte { + if !bytes.HasPrefix(key, p.dst) { + p.err = errInputPrefixMismatch + return key + } + p.arg = append(p.arg[:len(p.src)], key[len(p.dst):]...) + return p.arg +} + +func (p *prefixReplacingFragmentIterator) rewriteSpan(sp *keyspan.Span) *keyspan.Span { + if !bytes.HasPrefix(sp.Start, p.src) || !bytes.HasPrefix(sp.End, p.src) { + p.err = errInputPrefixMismatch + return sp + } + sp.Start = append(p.out1[:len(p.dst)], sp.Start[len(p.src):]...) + sp.End = append(p.out2[:len(p.dst)], sp.End[len(p.src):]...) + return sp +} + +// SeekGE implements the FragmentIterator interface. +func (p *prefixReplacingFragmentIterator) SeekGE(key []byte) *keyspan.Span { + return p.rewriteSpan(p.i.SeekGE(p.rewriteArg(key))) +} + +// SeekLT implements the FragmentIterator interface. +func (p *prefixReplacingFragmentIterator) SeekLT(key []byte) *keyspan.Span { + return p.rewriteSpan(p.i.SeekLT(p.rewriteArg(key))) +} + +// First implements the FragmentIterator interface. +func (p *prefixReplacingFragmentIterator) First() *keyspan.Span { + return p.rewriteSpan(p.i.First()) +} + +// Last implements the FragmentIterator interface. +func (p *prefixReplacingFragmentIterator) Last() *keyspan.Span { + return p.rewriteSpan(p.i.Last()) +} + +// Close implements the FragmentIterator interface. +func (p *prefixReplacingFragmentIterator) Next() *keyspan.Span { + return p.rewriteSpan(p.i.Next()) +} + +// Prev implements the FragmentIterator interface. +func (p *prefixReplacingFragmentIterator) Prev() *keyspan.Span { + return p.rewriteSpan(p.i.Prev()) +} + +// Error implements the FragmentIterator interface. +func (p *prefixReplacingFragmentIterator) Error() error { + if p.err != nil { + return p.err + } + return p.i.Error() +} + +// Close implements the FragmentIterator interface. +func (p *prefixReplacingFragmentIterator) Close() error { + return p.i.Close() +} diff --git a/vendor/github.com/cockroachdb/pebble/sstable/reader_iter_single_lvl.go b/vendor/github.com/cockroachdb/pebble/sstable/reader_iter_single_lvl.go index 73780b86fa..ed676957d6 100644 --- a/vendor/github.com/cockroachdb/pebble/sstable/reader_iter_single_lvl.go +++ b/vendor/github.com/cockroachdb/pebble/sstable/reader_iter_single_lvl.go @@ -5,6 +5,7 @@ package sstable import ( + "bytes" "context" "fmt" "unsafe" @@ -320,6 +321,23 @@ func disableBoundsOpt(bound []byte, ptr uintptr) bool { // state and require this behavior to be deterministic. var ensureBoundsOptDeterminism bool +// SetBoundsWithSyntheticPrefix indicates whether this iterator requires keys +// passed to its SetBounds() method by a prefix rewriting wrapper to be *not* +// rewritten to be in terms of this iterator's content, but instead be passed +// as-is, i.e. with the synthetic prefix still on them. +// +// This allows an optimization when this iterator is passing these bounds on to +// a vState to additionally constrain them. In said vState, passed bounds are +// combined with the vState bounds which are in terms of the rewritten prefix. +// If the caller rewrote bounds to be in terms of content prefix and SetBounds +// passed those to vState, the vState would need to *un*rewrite them back to the +// synthetic prefix in order to combine them with the vState bounds. Thus, if +// this iterator knows bounds will be passed to vState, it can signal that it +// they should be passed without being rewritten to skip converting to and fro. +func (i singleLevelIterator) SetBoundsWithSyntheticPrefix() bool { + return i.vState != nil +} + // SetBounds implements internalIterator.SetBounds, as documented in the pebble // package. Note that the upper field is exclusive. func (i *singleLevelIterator) SetBounds(lower, upper []byte) { @@ -440,7 +458,7 @@ func (i *singleLevelIterator) readBlockForVBR( // that a block is excluded according to its properties but only if its bounds // fall within the filter's current bounds. This function consults the // apprioriate bound, depending on the iteration direction, and returns either -// `blockIntersects` or `blockMaybeExcluded`. +// `blockIntersects` or `blockExcluded`. func (i *singleLevelIterator) resolveMaybeExcluded(dir int8) intersectsResult { // TODO(jackson): We could first try comparing to top-level index block's // key, and if within bounds avoid per-data block key comparisons. @@ -852,26 +870,70 @@ func (i *singleLevelIterator) virtualLast() (*InternalKey, base.LazyValue) { panic("pebble: invalid call to virtualLast") } - // Seek to the first internal key. - ikey, _ := i.SeekGE(i.upper, base.SeekGEFlagsNone) - if i.endKeyInclusive { - // Let's say the virtual sstable upper bound is c#1, with the keys c#3, c#2, - // c#1, d, e, ... in the sstable. So, the last key in the virtual sstable is - // c#1. We can perform SeekGE(i.upper) and then keep nexting until we find - // the last key with userkey == i.upper. - // - // TODO(bananabrick): Think about how to improve this. If many internal keys - // with the same user key at the upper bound then this could be slow, but - // maybe the odds of having many internal keys with the same user key at the - // upper bound are low. - for ikey != nil && i.cmp(ikey.UserKey, i.upper) == 0 { - ikey, _ = i.Next() + if !i.endKeyInclusive { + // Trivial case. + return i.SeekLT(i.upper, base.SeekLTFlagsNone) + } + return i.virtualLastSeekLE(i.upper) +} + +// virtualLastSeekLE is called by virtualLast to do a SeekLE as part of a +// virtualLast. Consider generalizing this into a SeekLE() if there are other +// uses of this method in the future. +func (i *singleLevelIterator) virtualLastSeekLE(key []byte) (*InternalKey, base.LazyValue) { + // Callers of SeekLE don't know about virtual sstable bounds, so we may + // have to internally restrict the bounds. + // + // TODO(bananabrick): We can optimize this check away for the level iter + // if necessary. + if i.cmp(key, i.upper) >= 0 { + if !i.endKeyInclusive { + panic("unexpected virtualLastSeekLE with exclusive upper bounds") } - return i.Prev() + key = i.upper } - // We seeked to the first key >= i.upper. - return i.Prev() + i.exhaustedBounds = 0 + i.err = nil // clear cached iteration error + // Seek optimization only applies until iterator is first positioned with a + // SeekGE or SeekLT after SetBounds. + i.boundsCmp = 0 + i.positionedUsingLatestBounds = true + i.maybeFilteredKeysSingleLevel = false + + ikey, _ := i.index.SeekGE(key, base.SeekGEFlagsNone) + // We can have multiple internal keys with the same user key as the seek + // key. In that case, we want the last (greatest) internal key. + // + // NB: We can avoid this Next()ing if we just implement a blockIter.SeekLE(). + // This might be challenging to do correctly, so impose regular operations + // for now. + for ikey != nil && bytes.Equal(ikey.UserKey, key) { + ikey, _ = i.index.Next() + } + if ikey == nil { + return i.skipBackward() + } + result := i.loadBlock(-1) + if result == loadBlockFailed { + return nil, base.LazyValue{} + } + if result == loadBlockIrrelevant { + // Want to skip to the previous block. + return i.skipBackward() + } + ikey, _ = i.data.SeekGE(key, base.SeekGEFlagsNone) + var val base.LazyValue + // Go to the last user key that matches key, and then Prev() on the data + // block. + for ikey != nil && bytes.Equal(ikey.UserKey, key) { + ikey, _ = i.data.Next() + } + ikey, val = i.data.Prev() + if ikey != nil { + return ikey, val + } + return i.skipBackward() } // SeekLT implements internalIterator.SeekLT, as documented in the pebble diff --git a/vendor/github.com/cockroachdb/pebble/sstable/reader_iter_two_lvl.go b/vendor/github.com/cockroachdb/pebble/sstable/reader_iter_two_lvl.go index 36fa8ac449..9a2a3f254c 100644 --- a/vendor/github.com/cockroachdb/pebble/sstable/reader_iter_two_lvl.go +++ b/vendor/github.com/cockroachdb/pebble/sstable/reader_iter_two_lvl.go @@ -5,6 +5,7 @@ package sstable import ( + "bytes" "context" "fmt" @@ -76,8 +77,7 @@ func (i *twoLevelIterator) loadIndex(dir int8) loadBlockResult { // that an index block is excluded according to its properties but only if its // bounds fall within the filter's current bounds. This function consults the // apprioriate bound, depending on the iteration direction, and returns either -// `blockIntersects` or -// `blockMaybeExcluded`. +// `blockIntersects` or `blockExcluded`. func (i *twoLevelIterator) resolveMaybeExcluded(dir int8) intersectsResult { // This iterator is configured with a bound-limited block property filter. // The bpf determined this entire index block could be excluded from @@ -567,32 +567,63 @@ func (i *twoLevelIterator) SeekPrefixGE( return i.skipForward() } -// virtualLast should only be called if i.vReader != nil and i.endKeyInclusive -// is true. +// virtualLast should only be called if i.vReader != nil. func (i *twoLevelIterator) virtualLast() (*InternalKey, base.LazyValue) { if i.vState == nil { panic("pebble: invalid call to virtualLast") } + if !i.endKeyInclusive { + // Trivial case. + return i.SeekLT(i.upper, base.SeekLTFlagsNone) + } + return i.virtualLastSeekLE(i.upper) +} - // Seek to the first internal key. - ikey, _ := i.SeekGE(i.upper, base.SeekGEFlagsNone) - if i.endKeyInclusive { - // Let's say the virtual sstable upper bound is c#1, with the keys c#3, c#2, - // c#1, d, e, ... in the sstable. So, the last key in the virtual sstable is - // c#1. We can perform SeekGE(i.upper) and then keep nexting until we find - // the last key with userkey == i.upper. - // - // TODO(bananabrick): Think about how to improve this. If many internal keys - // with the same user key at the upper bound then this could be slow, but - // maybe the odds of having many internal keys with the same user key at the - // upper bound are low. - for ikey != nil && i.cmp(ikey.UserKey, i.upper) == 0 { - ikey, _ = i.Next() +// virtualLastSeekLE implements a SeekLE() that can be used as part +// of reverse-iteration calls such as a Last() on a virtual sstable. +func (i *twoLevelIterator) virtualLastSeekLE(key []byte) (*InternalKey, base.LazyValue) { + // Callers of SeekLE don't know about virtual sstable bounds, so we may + // have to internally restrict the bounds. + // + // TODO(bananabrick): We can optimize this check away for the level iter + // if necessary. + if i.cmp(key, i.upper) >= 0 { + if !i.endKeyInclusive { + panic("unexpected virtualLastSeekLE with exclusive upper bounds") } - return i.Prev() + key = i.upper + } + // Need to position the topLevelIndex. + // + // The previous exhausted state of singleLevelIterator is no longer + // relevant, since we may be moving to a different index block. + i.exhaustedBounds = 0 + // Seek optimization only applies until iterator is first positioned with a + // SeekGE or SeekLT after SetBounds. + i.boundsCmp = 0 + i.maybeFilteredKeysTwoLevel = false + ikey, _ := i.topLevelIndex.SeekGE(key, base.SeekGEFlagsNone) + // We can have multiple internal keys with the same user key as the seek + // key. In that case, we want the last (greatest) internal key. + for ikey != nil && bytes.Equal(ikey.UserKey, key) { + ikey, _ = i.topLevelIndex.Next() } - // We seeked to the first key >= i.upper. - return i.Prev() + if ikey == nil { + return i.skipBackward() + } + result := i.loadIndex(-1) + if result == loadBlockFailed { + i.boundsCmp = 0 + return nil, base.LazyValue{} + } + if result == loadBlockIrrelevant { + // Load the previous block. + return i.skipBackward() + } + if ikey, val := i.singleLevelIterator.virtualLastSeekLE(key); ikey != nil { + return ikey, val + } + return i.skipBackward() } // SeekLT implements internalIterator.SeekLT, as documented in the pebble diff --git a/vendor/github.com/cockroachdb/pebble/sstable/reader_virtual.go b/vendor/github.com/cockroachdb/pebble/sstable/reader_virtual.go index 4f05e39d7a..fb74cdf23e 100644 --- a/vendor/github.com/cockroachdb/pebble/sstable/reader_virtual.go +++ b/vendor/github.com/cockroachdb/pebble/sstable/reader_virtual.go @@ -5,6 +5,7 @@ package sstable import ( + "bytes" "context" "github.com/cockroachdb/pebble/internal/base" @@ -27,11 +28,12 @@ type VirtualReader struct { // Lightweight virtual sstable state which can be passed to sstable iterators. type virtualState struct { - lower InternalKey - upper InternalKey - fileNum base.FileNum - Compare Compare - isForeign bool + lower InternalKey + upper InternalKey + fileNum base.FileNum + Compare Compare + isForeign bool + prefixChange *manifest.PrefixReplacement } func ceilDiv(a, b uint64) uint64 { @@ -48,11 +50,12 @@ func MakeVirtualReader( } vState := virtualState{ - lower: meta.Smallest, - upper: meta.Largest, - fileNum: meta.FileNum, - Compare: reader.Compare, - isForeign: isForeign, + lower: meta.Smallest, + upper: meta.Largest, + fileNum: meta.FileNum, + Compare: reader.Compare, + isForeign: isForeign, + prefixChange: meta.PrefixReplacement, } v := VirtualReader{ vState: vState, @@ -74,6 +77,7 @@ func MakeVirtualReader( v.Properties.NumSizedDeletions = ceilDiv(reader.Properties.NumSizedDeletions*meta.Size, meta.FileBacking.Size) v.Properties.RawPointTombstoneKeySize = ceilDiv(reader.Properties.RawPointTombstoneKeySize*meta.Size, meta.FileBacking.Size) v.Properties.RawPointTombstoneValueSize = ceilDiv(reader.Properties.RawPointTombstoneValueSize*meta.Size, meta.FileBacking.Size) + return v } @@ -85,8 +89,12 @@ func (v *VirtualReader) NewCompactionIter( rp ReaderProvider, bufferPool *BufferPool, ) (Iterator, error) { - return v.reader.newCompactionIter( + i, err := v.reader.newCompactionIter( bytesIterated, categoryAndQoS, statsCollector, rp, &v.vState, bufferPool) + if err == nil && v.vState.prefixChange != nil { + i = newPrefixReplacingIterator(i, v.vState.prefixChange.ContentPrefix, v.vState.prefixChange.SyntheticPrefix, v.reader.Compare) + } + return i, err } // NewIterWithBlockPropertyFiltersAndContextEtc wraps @@ -103,9 +111,13 @@ func (v *VirtualReader) NewIterWithBlockPropertyFiltersAndContextEtc( statsCollector *CategoryStatsCollector, rp ReaderProvider, ) (Iterator, error) { - return v.reader.newIterWithBlockPropertyFiltersAndContext( + i, err := v.reader.newIterWithBlockPropertyFiltersAndContext( ctx, lower, upper, filterer, hideObsoletePoints, useFilterBlock, stats, categoryAndQoS, statsCollector, rp, &v.vState) + if err == nil && v.vState.prefixChange != nil { + i = newPrefixReplacingIterator(i, v.vState.prefixChange.ContentPrefix, v.vState.prefixChange.SyntheticPrefix, v.reader.Compare) + } + return i, err } // ValidateBlockChecksumsOnBacking will call ValidateBlockChecksumsOnBacking on the underlying reader. @@ -123,6 +135,19 @@ func (v *VirtualReader) NewRawRangeDelIter() (keyspan.FragmentIterator, error) { if iter == nil { return nil, nil } + lower := &v.vState.lower + upper := &v.vState.upper + + if v.vState.prefixChange != nil { + lower = &InternalKey{UserKey: v.vState.prefixChange.ReplaceArg(lower.UserKey), Trailer: lower.Trailer} + upper = &InternalKey{UserKey: v.vState.prefixChange.ReplaceArg(upper.UserKey), Trailer: upper.Trailer} + + iter = keyspan.Truncate( + v.reader.Compare, iter, lower.UserKey, upper.UserKey, + lower, upper, !v.vState.upper.IsExclusiveSentinel(), /* panicOnUpperTruncate */ + ) + return newPrefixReplacingFragmentIterator(iter, v.vState.prefixChange.ContentPrefix, v.vState.prefixChange.SyntheticPrefix), nil + } // Truncation of spans isn't allowed at a user key that also contains points // in the same virtual sstable, as it would lead to covered points getting @@ -135,8 +160,8 @@ func (v *VirtualReader) NewRawRangeDelIter() (keyspan.FragmentIterator, error) { // includes both point keys), but not [a#2,SET-b#3,SET] (as it would truncate // the rangedel at b and lead to the point being uncovered). return keyspan.Truncate( - v.reader.Compare, iter, v.vState.lower.UserKey, v.vState.upper.UserKey, - &v.vState.lower, &v.vState.upper, !v.vState.upper.IsExclusiveSentinel(), /* panicOnUpperTruncate */ + v.reader.Compare, iter, lower.UserKey, upper.UserKey, + lower, upper, !v.vState.upper.IsExclusiveSentinel(), /* panicOnUpperTruncate */ ), nil } @@ -149,6 +174,18 @@ func (v *VirtualReader) NewRawRangeKeyIter() (keyspan.FragmentIterator, error) { if iter == nil { return nil, nil } + lower := &v.vState.lower + upper := &v.vState.upper + + if v.vState.prefixChange != nil { + lower = &InternalKey{UserKey: v.vState.prefixChange.ReplaceArg(lower.UserKey), Trailer: lower.Trailer} + upper = &InternalKey{UserKey: v.vState.prefixChange.ReplaceArg(upper.UserKey), Trailer: upper.Trailer} + iter = keyspan.Truncate( + v.reader.Compare, iter, lower.UserKey, upper.UserKey, + lower, upper, !v.vState.upper.IsExclusiveSentinel(), /* panicOnUpperTruncate */ + ) + return newPrefixReplacingFragmentIterator(iter, v.vState.prefixChange.ContentPrefix, v.vState.prefixChange.SyntheticPrefix), nil + } // Truncation of spans isn't allowed at a user key that also contains points // in the same virtual sstable, as it would lead to covered points getting @@ -161,8 +198,8 @@ func (v *VirtualReader) NewRawRangeKeyIter() (keyspan.FragmentIterator, error) { // includes both point keys), but not [a#2,SET-b#3,SET] (as it would truncate // the range key at b and lead to the point being uncovered). return keyspan.Truncate( - v.reader.Compare, iter, v.vState.lower.UserKey, v.vState.upper.UserKey, - &v.vState.lower, &v.vState.upper, !v.vState.upper.IsExclusiveSentinel(), /* panicOnUpperTruncate */ + v.reader.Compare, iter, lower.UserKey, upper.UserKey, + lower, upper, !v.vState.upper.IsExclusiveSentinel(), /* panicOnUpperTruncate */ ), nil } @@ -195,6 +232,10 @@ func (v *virtualState) constrainBounds( last = end } } + if v.prefixChange != nil { + first = v.prefixChange.ReplaceArg(first) + last = v.prefixChange.ReplaceArg(last) + } // TODO(bananabrick): What if someone passes in bounds completely outside of // virtual sstable bounds? return lastKeyInclusive, first, last @@ -204,6 +245,16 @@ func (v *virtualState) constrainBounds( // enforcing the virtual sstable bounds. func (v *VirtualReader) EstimateDiskUsage(start, end []byte) (uint64, error) { _, f, l := v.vState.constrainBounds(start, end, true /* endInclusive */) + if v.vState.prefixChange != nil { + if !bytes.HasPrefix(f, v.vState.prefixChange.SyntheticPrefix) || !bytes.HasPrefix(l, v.vState.prefixChange.SyntheticPrefix) { + return 0, errInputPrefixMismatch + } + // TODO(dt): we could add a scratch buf to VirtualReader to avoid allocs on + // repeated calls to this. + f = append(append([]byte{}, v.vState.prefixChange.ContentPrefix...), f[len(v.vState.prefixChange.SyntheticPrefix):]...) + l = append(append([]byte{}, v.vState.prefixChange.ContentPrefix...), l[len(v.vState.prefixChange.SyntheticPrefix):]...) + } + return v.reader.EstimateDiskUsage(f, l) } diff --git a/vendor/github.com/cockroachdb/pebble/sstable/table.go b/vendor/github.com/cockroachdb/pebble/sstable/table.go index 2f36f3076e..8f3c139dad 100644 --- a/vendor/github.com/cockroachdb/pebble/sstable/table.go +++ b/vendor/github.com/cockroachdb/pebble/sstable/table.go @@ -206,9 +206,6 @@ const ( minFooterLen = levelDBFooterLen maxFooterLen = rocksDBFooterLen - levelDBFormatVersion = 0 - rocksDBFormatVersion2 = 2 - metaRangeKeyName = "pebble.range_key" metaValueIndexName = "pebble.value_index" metaPropertiesName = "rocksdb.properties" @@ -342,18 +339,7 @@ func readFooter(f objstorage.Readable) (footer, error) { } switch magic := buf[len(buf)-len(rocksDBMagic):]; string(magic) { - case levelDBMagic: - if len(buf) < levelDBFooterLen { - return footer, base.CorruptionErrorf( - "pebble/table: invalid table (footer too short): %d", errors.Safe(len(buf))) - } - footer.footerBH.Offset = uint64(off+int64(len(buf))) - levelDBFooterLen - buf = buf[len(buf)-levelDBFooterLen:] - footer.footerBH.Length = uint64(len(buf)) - footer.format = TableFormatLevelDB - footer.checksum = ChecksumTypeCRC32c - - case rocksDBMagic, pebbleDBMagic: + case pebbleDBMagic: // NOTE: The Pebble magic string implies the same footer format as that used // by the RocksDBv2 table format. if len(buf) < rocksDBFooterLen { @@ -404,14 +390,7 @@ func readFooter(f objstorage.Readable) (footer, error) { func (f footer) encode(buf []byte) []byte { switch magic, version := f.format.AsTuple(); magic { - case levelDBMagic: - buf = buf[:levelDBFooterLen] - clear(buf) - n := encodeBlockHandle(buf[0:], f.metaindexBH) - encodeBlockHandle(buf[n:], f.indexBH) - copy(buf[len(buf)-len(levelDBMagic):], levelDBMagic) - - case rocksDBMagic, pebbleDBMagic: + case pebbleDBMagic: buf = buf[:rocksDBFooterLen] clear(buf) switch f.checksum { @@ -440,12 +419,5 @@ func (f footer) encode(buf []byte) []byte { } func supportsTwoLevelIndex(format TableFormat) bool { - switch format { - case TableFormatLevelDB: - return false - case TableFormatRocksDBv2, TableFormatPebblev1, TableFormatPebblev2, TableFormatPebblev3, TableFormatPebblev4: - return true - default: - panic("sstable: unspecified table format version") - } + return format >= TableFormatMinSupported } diff --git a/vendor/github.com/cockroachdb/pebble/table_cache.go b/vendor/github.com/cockroachdb/pebble/table_cache.go index c4e8bf190e..e81f019620 100644 --- a/vendor/github.com/cockroachdb/pebble/table_cache.go +++ b/vendor/github.com/cockroachdb/pebble/table_cache.go @@ -462,6 +462,11 @@ func (c *tableCacheShard) newIters( // // An alternative would be to have different slices for different sstable // iterators, but that requires more work to avoid allocations. + // + // TODO(bilal): for compaction reads of foreign sstables, we do hide + // obsolete points (see sstable.Reader.newCompactionIter) but we don't + // apply the obsolete block property filter. We could optimize this by + // applying the filter. hideObsoletePoints, pointKeyFilters = v.reader.TryAddBlockPropertyFilterForHideObsoletePoints( opts.snapshotForHideObsoletePoints, file.LargestSeqNum, opts.PointKeyFilters) @@ -496,6 +501,19 @@ func (c *tableCacheShard) newIters( return nil, nil, err } + // Assert expected bounds in tests. + if invariants.Enabled && rangeDelIter != nil { + cmp := base.DefaultComparer.Compare + if dbOpts.opts.Comparer != nil { + cmp = dbOpts.opts.Comparer.Compare + } + // TODO(radu): we should be using AssertBounds, but it currently fails in + // some cases (#3167). + rangeDelIter = keyspan.AssertUserKeyBounds( + rangeDelIter, file.SmallestPointKey.UserKey, file.LargestPointKey.UserKey, cmp, + ) + } + if !ok { c.unrefValue(v) // Return an empty iterator. This iterator has no mutable state, so diff --git a/vendor/github.com/cockroachdb/pebble/table_stats.go b/vendor/github.com/cockroachdb/pebble/table_stats.go index 76c940cfdf..33a5fd6e68 100644 --- a/vendor/github.com/cockroachdb/pebble/table_stats.go +++ b/vendor/github.com/cockroachdb/pebble/table_stats.go @@ -10,6 +10,7 @@ import ( "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/base" + "github.com/cockroachdb/pebble/internal/invariants" "github.com/cockroachdb/pebble/internal/keyspan" "github.com/cockroachdb/pebble/internal/manifest" "github.com/cockroachdb/pebble/sstable" @@ -905,6 +906,14 @@ func newCombinedDeletionKeyspanIter( return nil, err } if iter != nil { + // Assert expected bounds in tests. + if invariants.Enabled { + // TODO(radu): we should be using AssertBounds, but it currently fails in + // some cases (#3167). + iter = keyspan.AssertUserKeyBounds( + iter, m.SmallestPointKey.UserKey, m.LargestPointKey.UserKey, comparer.Compare, + ) + } dIter := &keyspan.DefragmentingIter{} dIter.Init(comparer, iter, equal, reducer, new(keyspan.DefragmentingBuffers)) iter = dIter @@ -922,6 +931,14 @@ func newCombinedDeletionKeyspanIter( return nil, err } if iter != nil { + // Assert expected bounds in tests. + if invariants.Enabled { + // TODO(radu): we should be using AssertBounds, but it currently fails in + // some cases (#3167). + iter = keyspan.AssertUserKeyBounds( + iter, m.SmallestRangeKey.UserKey, m.LargestRangeKey.UserKey, comparer.Compare, + ) + } // Wrap the range key iterator in a filter that elides keys other than range // key deletions. iter = keyspan.Filter(iter, func(in *keyspan.Span, out *keyspan.Span) (keep bool) { diff --git a/vendor/github.com/cockroachdb/pebble/version_set.go b/vendor/github.com/cockroachdb/pebble/version_set.go index 35dd0e76fb..676aec6e3b 100644 --- a/vendor/github.com/cockroachdb/pebble/version_set.go +++ b/vendor/github.com/cockroachdb/pebble/version_set.go @@ -5,14 +5,12 @@ package pebble import ( - "bytes" "fmt" "io" "sync" "sync/atomic" "github.com/cockroachdb/errors" - "github.com/cockroachdb/errors/oserror" "github.com/cockroachdb/pebble/internal/base" "github.com/cockroachdb/pebble/internal/invariants" "github.com/cockroachdb/pebble/internal/manifest" @@ -116,7 +114,6 @@ type versionSet struct { manifestFile vfs.File manifest *record.Writer - setCurrent func(base.DiskFileNum) error getFormatMajorVersion func() FormatMajorVersion writing bool @@ -129,7 +126,6 @@ func (vs *versionSet) init( dirname string, opts *Options, marker *atomicfs.Marker, - setCurrent func(base.DiskFileNum) error, getFMV func() FormatMajorVersion, mu *sync.Mutex, ) { @@ -148,7 +144,6 @@ func (vs *versionSet) init( vs.backingState.fileBackingSize = 0 vs.nextFileNum = 1 vs.manifestMarker = marker - vs.setCurrent = setCurrent vs.getFormatMajorVersion = getFMV } @@ -158,11 +153,10 @@ func (vs *versionSet) create( dirname string, opts *Options, marker *atomicfs.Marker, - setCurrent func(base.DiskFileNum) error, getFormatMajorVersion func() FormatMajorVersion, mu *sync.Mutex, ) error { - vs.init(dirname, opts, marker, setCurrent, getFormatMajorVersion, mu) + vs.init(dirname, opts, marker, getFormatMajorVersion, mu) newVersion := &version{} vs.append(newVersion) var err error @@ -183,8 +177,8 @@ func (vs *versionSet) create( } } if err == nil { - // NB: setCurrent is responsible for syncing the data directory. - if err = vs.setCurrent(vs.manifestFileNum); err != nil { + // NB: Move() is responsible for syncing the data directory. + if err = vs.manifestMarker.Move(base.MakeFilename(fileTypeManifest, vs.manifestFileNum)); err != nil { vs.opts.Logger.Fatalf("MANIFEST set current failed: %v", err) } } @@ -207,11 +201,10 @@ func (vs *versionSet) load( opts *Options, manifestFileNum base.DiskFileNum, marker *atomicfs.Marker, - setCurrent func(base.DiskFileNum) error, getFormatMajorVersion func() FormatMajorVersion, mu *sync.Mutex, ) error { - vs.init(dirname, opts, marker, setCurrent, getFormatMajorVersion, mu) + vs.init(dirname, opts, marker, getFormatMajorVersion, mu) vs.manifestFileNum = manifestFileNum manifestPath := base.MakeFilepath(opts.FS, dirname, fileTypeManifest, vs.manifestFileNum) @@ -310,7 +303,6 @@ func (vs *versionSet) load( newVersion, err := bve.Apply( nil, vs.cmp, opts.Comparer.FormatKey, opts.FlushSplitBytes, opts.Experimental.ReadCompactionRate, nil, /* zombies */ - getFormatMajorVersion().orderingInvariants(), ) if err != nil { return err @@ -445,8 +437,6 @@ func (vs *versionSet) logAndApply( } currentVersion := vs.currentVersion() - fmv := vs.getFormatMajorVersion() - orderingInvariants := fmv.orderingInvariants() var newVersion *version // Generate a new manifest if we don't currently have one, or forceRotation @@ -524,7 +514,6 @@ func (vs *versionSet) logAndApply( ve, currentVersion, vs.cmp, vs.opts.Comparer.FormatKey, vs.opts.FlushSplitBytes, vs.opts.Experimental.ReadCompactionRate, vs.backingState.fileBackingMap, vs.addFileBacking, vs.removeFileBacking, - orderingInvariants, ) if err != nil { return errors.Wrap(err, "MANIFEST apply failed") @@ -562,8 +551,8 @@ func (vs *versionSet) logAndApply( return errors.Wrap(err, "MANIFEST sync failed") } if newManifestFileNum != 0 { - // NB: setCurrent is responsible for syncing the data directory. - if err := vs.setCurrent(newManifestFileNum); err != nil { + // NB: Move() is responsible for syncing the data directory. + if err := vs.manifestMarker.Move(base.MakeFilename(fileTypeManifest, newManifestFileNum)); err != nil { return errors.Wrap(err, "MANIFEST set current failed") } vs.opts.EventListener.ManifestCreated(ManifestCreateInfo{ @@ -883,69 +872,16 @@ func (vs *versionSet) updateObsoleteTableMetricsLocked() { } } -func setCurrentFunc( - vers FormatMajorVersion, marker *atomicfs.Marker, fs vfs.FS, dirname string, dir vfs.File, -) func(base.DiskFileNum) error { - if vers < formatVersionedManifestMarker { - // Pebble versions before `formatVersionedManifestMarker` used - // the CURRENT file to signal which MANIFEST is current. Ignore - // the filename read during LocateMarker. - return func(manifestFileNum base.DiskFileNum) error { - if err := setCurrentFile(dirname, fs, manifestFileNum); err != nil { - return err - } - if err := dir.Sync(); err != nil { - // This is a panic here, rather than higher in the call - // stack, for parity with the atomicfs.Marker behavior. - // A panic is always necessary because failed Syncs are - // unrecoverable. - panic(errors.Wrap(err, "fatal: MANIFEST dirsync failed")) - } - return nil - } - } - return setCurrentFuncMarker(marker, fs, dirname) -} - -func setCurrentFuncMarker( - marker *atomicfs.Marker, fs vfs.FS, dirname string, -) func(base.DiskFileNum) error { - return func(manifestFileNum base.DiskFileNum) error { - return marker.Move(base.MakeFilename(fileTypeManifest, manifestFileNum)) - } -} - func findCurrentManifest( - vers FormatMajorVersion, fs vfs.FS, dirname string, + fs vfs.FS, dirname string, ) (marker *atomicfs.Marker, manifestNum base.DiskFileNum, exists bool, err error) { - // NB: We always locate the manifest marker, even if we might not - // actually use it (because we're opening the database at an earlier - // format major version that uses the CURRENT file). Locating a - // marker should succeed even if the marker has never been placed. + // Locating a marker should succeed even if the marker has never been placed. var filename string marker, filename, err = atomicfs.LocateMarker(fs, dirname, manifestMarkerName) if err != nil { return nil, base.FileNum(0).DiskFileNum(), false, err } - if vers < formatVersionedManifestMarker { - // Pebble versions before `formatVersionedManifestMarker` used - // the CURRENT file to signal which MANIFEST is current. Ignore - // the filename read during LocateMarker. - - manifestNum, err = readCurrentFile(fs, dirname) - if oserror.IsNotExist(err) { - return marker, base.FileNum(0).DiskFileNum(), false, nil - } else if err != nil { - return marker, base.FileNum(0).DiskFileNum(), false, err - } - return marker, manifestNum, true, nil - } - - // The current format major version is >= - // formatVersionedManifestMarker indicating that the - // atomicfs.Marker is the source of truth on the current manifest. - if filename == "" { // The marker hasn't been set yet. This database doesn't exist. return marker, base.FileNum(0).DiskFileNum(), false, nil @@ -959,41 +895,6 @@ func findCurrentManifest( return marker, manifestNum, true, nil } -func readCurrentFile(fs vfs.FS, dirname string) (base.DiskFileNum, error) { - // Read the CURRENT file to find the current manifest file. - current, err := fs.Open(base.MakeFilepath(fs, dirname, fileTypeCurrent, base.FileNum(0).DiskFileNum())) - if err != nil { - return base.FileNum(0).DiskFileNum(), errors.Wrapf(err, "pebble: could not open CURRENT file for DB %q", dirname) - } - defer current.Close() - stat, err := current.Stat() - if err != nil { - return base.FileNum(0).DiskFileNum(), err - } - n := stat.Size() - if n == 0 { - return base.FileNum(0).DiskFileNum(), errors.Errorf("pebble: CURRENT file for DB %q is empty", dirname) - } - if n > 4096 { - return base.FileNum(0).DiskFileNum(), errors.Errorf("pebble: CURRENT file for DB %q is too large", dirname) - } - b := make([]byte, n) - _, err = current.ReadAt(b, 0) - if err != nil { - return base.FileNum(0).DiskFileNum(), err - } - if b[n-1] != '\n' { - return base.FileNum(0).DiskFileNum(), base.CorruptionErrorf("pebble: CURRENT file for DB %q is malformed", dirname) - } - b = bytes.TrimSpace(b) - - _, manifestFileNum, ok := base.ParseFilename(fs, string(b)) - if !ok { - return base.FileNum(0).DiskFileNum(), base.CorruptionErrorf("pebble: MANIFEST name %q is malformed", errors.Safe(b)) - } - return manifestFileNum, nil -} - func newFileMetrics(newFiles []manifest.NewFileEntry) map[int]*LevelMetrics { m := map[int]*LevelMetrics{} for _, nf := range newFiles { diff --git a/vendor/github.com/dgraph-io/badger/v4/README.md b/vendor/github.com/dgraph-io/badger/v4/README.md index 38a834f9ee..2f30bc0858 100644 --- a/vendor/github.com/dgraph-io/badger/v4/README.md +++ b/vendor/github.com/dgraph-io/badger/v4/README.md @@ -220,6 +220,7 @@ Below is a list of known projects that use Badger: * [raft-badger](https://github.com/rfyiamcool/raft-badger) - raft-badger implements LogStore and StableStore Interface of hashcorp/raft. it is used to store raft log and metadata of hashcorp/raft. * [DVID](https://github.com/janelia-flyem/dvid) - A dataservice for branched versioning of a variety of data types. Originally created for large-scale brain reconstructions in Connectomics. * [KVS](https://github.com/tauraamui/kvs) - A library for making it easy to persist, load and query full structs into BadgerDB, using an ownership hierarchy model. +* [LLS](https://github.com/Boc-chi-no/LLS) - LLS is an efficient URL Shortener that can be used to shorten links and track link usage. Support for BadgerDB and MongoDB. Improved performance by more than 30% when using BadgerDB If you are using Badger in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/dgraph-io/badger/v4/level_handler.go b/vendor/github.com/dgraph-io/badger/v4/level_handler.go index 31673f15b2..fc81cc452c 100644 --- a/vendor/github.com/dgraph-io/badger/v4/level_handler.go +++ b/vendor/github.com/dgraph-io/badger/v4/level_handler.go @@ -165,8 +165,8 @@ func (s *levelHandler) addTable(t *table.Table) { // sortTables sorts tables of levelHandler based on table.Smallest. // Normally it should be called after all addTable calls. func (s *levelHandler) sortTables() { - s.RLock() - defer s.RUnlock() + s.Lock() + defer s.Unlock() sort.Slice(s.tables, func(i, j int) bool { return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0 diff --git a/vendor/github.com/dgraph-io/badger/v4/txn.go b/vendor/github.com/dgraph-io/badger/v4/txn.go index 691d4bc493..438af8d5df 100644 --- a/vendor/github.com/dgraph-io/badger/v4/txn.go +++ b/vendor/github.com/dgraph-io/badger/v4/txn.go @@ -718,6 +718,8 @@ func (txn *Txn) CommitWith(cb func(error)) { // callback might be acquiring the same locks. Instead run the callback // from another goroutine. go runTxnCallback(&txnCb{user: cb, err: nil}) + // Discard the transaction so that the read is marked done. + txn.Discard() return } diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index a8c29bfbd5..8969526a6e 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -91,11 +91,12 @@ logr design but also left out some parts and changed others: | Adding a name to a logger | `WithName` | no API | | Modify verbosity of log entries in a call chain | `V` | no API | | Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | +| Pass context for extracting additional values | no API | API variants like `InfoCtx` | The high-level slog API is explicitly meant to be one of many different APIs that can be layered on top of a shared `slog.Handler`. logr is one such -alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr) -package. +alternative API, with [interoperability](#slog-interoperability) provided by +some conversion functions. ### Inspiration @@ -145,24 +146,24 @@ There are implementations for the following logging libraries: ## slog interoperability Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` -and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and -`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`. +and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and +`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`. As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level -slog API. `slogr` itself leaves that to the caller. +slog API. -## Using a `logr.Sink` as backend for slog +### Using a `logr.LogSink` as backend for slog Ideally, a logr sink implementation should support both logr and slog by -implementing both the normal logr interface(s) and `slogr.SlogSink`. Because +implementing both the normal logr interface(s) and `SlogSink`. Because of a conflict in the parameters of the common `Enabled` method, it is [not possible to implement both slog.Handler and logr.Sink in the same type](https://github.com/golang/go/issues/59110). If both are supported, log calls can go from the high-level APIs to the backend -without the need to convert parameters. `NewLogr` and `NewSlogHandler` can +without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can convert back and forth without adding additional wrappers, with one exception: when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then -`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future +`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future log calls. Such an implementation should also support values that implement specific @@ -187,13 +188,13 @@ Not supporting slog has several drawbacks: These drawbacks are severe enough that applications using a mixture of slog and logr should switch to a different backend. -## Using a `slog.Handler` as backend for logr +### Using a `slog.Handler` as backend for logr Using a plain `slog.Handler` without support for logr works better than the other direction: - All logr verbosity levels can be mapped 1:1 to their corresponding slog level by negating them. -- Stack unwinding is done by the `slogr.SlogSink` and the resulting program +- Stack unwinding is done by the `SlogSink` and the resulting program counter is passed to the `slog.Handler`. - Names added via `Logger.WithName` are gathered and recorded in an additional attribute with `logger` as key and the names separated by slash as value. @@ -205,27 +206,39 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility with logr implementations without slog support is not important, then `slog.Valuer` is sufficient. -## Context support for slog +### Context support for slog Storing a logger in a `context.Context` is not supported by -slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this -to fill this gap: - - func HandlerFromContext(ctx context.Context) slog.Handler { - logger, err := logr.FromContext(ctx) - if err == nil { - return slogr.NewSlogHandler(logger) - } - return slog.Default().Handler() - } - - func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context { - return logr.NewContext(ctx, slogr.NewLogr(handler)) - } - -The downside is that storing and retrieving a `slog.Handler` needs more -allocations compared to using a `logr.Logger`. Therefore the recommendation is -to use the `logr.Logger` API in code which uses contextual logging. +slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be +used to fill this gap. They store and retrieve a `slog.Logger` pointer +under the same context key that is also used by `NewContext` and +`FromContext` for `logr.Logger` value. + +When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will +automatically convert the `slog.Logger` to a +`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction. + +With this approach, binaries which use either slog or logr are as efficient as +possible with no unnecessary allocations. This is also why the API stores a +`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger` +on retrieval would need to allocate one. + +The downside is that switching back and forth needs more allocations. Because +logr is the API that is already in use by different packages, in particular +Kubernetes, the recommendation is to use the `logr.Logger` API in code which +uses contextual logging. + +An alternative to adding values to a logger and storing that logger in the +context is to store the values in the context and to configure a logging +backend to extract those values when emitting log entries. This only works when +log calls are passed the context, which is not supported by the logr API. + +With the slog API, it is possible, but not +required. https://github.com/veqryn/slog-context is a package for slog which +provides additional support code for this approach. It also contains wrappers +for the context functions in logr, so developers who prefer to not use the logr +APIs directly can use those instead and the resulting code will still be +interoperable with logr. ## FAQ diff --git a/vendor/github.com/go-logr/logr/context.go b/vendor/github.com/go-logr/logr/context.go new file mode 100644 index 0000000000..de8bcc3ad8 --- /dev/null +++ b/vendor/github.com/go-logr/logr/context.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// contextKey is how we find Loggers in a context.Context. With Go < 1.21, +// the value is always a Logger value. With Go >= 1.21, the value can be a +// Logger value or a slog.Logger pointer. +type contextKey struct{} + +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go new file mode 100644 index 0000000000..f012f9a18e --- /dev/null +++ b/vendor/github.com/go-logr/logr/context_noslog.go @@ -0,0 +1,49 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v, nil + } + + return Logger{}, notFoundError{} +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go new file mode 100644 index 0000000000..065ef0b828 --- /dev/null +++ b/vendor/github.com/go-logr/logr/context_slog.go @@ -0,0 +1,83 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "fmt" + "log/slog" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + v := ctx.Value(contextKey{}) + if v == nil { + return Logger{}, notFoundError{} + } + + switch v := v.(type) { + case Logger: + return v, nil + case *slog.Logger: + return FromSlogHandler(v.Handler()), nil + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found. +func FromContextAsSlogLogger(ctx context.Context) *slog.Logger { + v := ctx.Value(contextKey{}) + if v == nil { + return nil + } + + switch v := v.(type) { + case Logger: + return slog.New(ToSlogHandler(v)) + case *slog.Logger: + return v + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if logger, err := FromContext(ctx); err == nil { + return logger + } + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} + +// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the +// provided slog.Logger. +func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index 12e5807cc5..fb2f866f4b 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -100,6 +100,11 @@ type Options struct { // details, see docs for Go's time.Layout. TimestampFormat string + // LogInfoLevel tells funcr what key to use to log the info level. + // If not specified, the info level will be logged as "level". + // If this is set to "", the info level will not be logged at all. + LogInfoLevel *string + // Verbosity tells funcr which V logs to produce. Higher values enable // more logs. Info logs at or below this level will be written, while logs // above this level will be discarded. @@ -213,6 +218,10 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { if opts.MaxLogDepth == 0 { opts.MaxLogDepth = defaultMaxLogDepth } + if opts.LogInfoLevel == nil { + opts.LogInfoLevel = new(string) + *opts.LogInfoLevel = "level" + } f := Formatter{ outputFormat: outfmt, prefix: "", @@ -227,12 +236,15 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { // implementation. It should be constructed with NewFormatter. Some of // its methods directly implement logr.LogSink. type Formatter struct { - outputFormat outputFormat - prefix string - values []any - valuesStr string - depth int - opts *Options + outputFormat outputFormat + prefix string + values []any + valuesStr string + parentValuesStr string + depth int + opts *Options + group string // for slog groups + groupDepth int } // outputFormat indicates which outputFormat to use. @@ -253,33 +265,62 @@ func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) if f.outputFormat == outputJSON { - buf.WriteByte('{') + buf.WriteByte('{') // for the whole line } + vals := builtins if hook := f.opts.RenderBuiltinsHook; hook != nil { vals = hook(f.sanitize(vals)) } f.flatten(buf, vals, false, false) // keys are ours, no need to escape continuing := len(builtins) > 0 - if len(f.valuesStr) > 0 { + + if f.parentValuesStr != "" { if continuing { - if f.outputFormat == outputJSON { - buf.WriteByte(',') - } else { - buf.WriteByte(' ') - } + buf.WriteByte(f.comma()) } + buf.WriteString(f.parentValuesStr) continuing = true + } + + groupDepth := f.groupDepth + if f.group != "" { + if f.valuesStr != "" || len(args) != 0 { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') // for the group + continuing = false + } else { + // The group was empty + groupDepth-- + } + } + + if f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } buf.WriteString(f.valuesStr) + continuing = true } + vals = args if hook := f.opts.RenderArgsHook; hook != nil { vals = hook(f.sanitize(vals)) } f.flatten(buf, vals, continuing, true) // escape user-provided keys + + for i := 0; i < groupDepth; i++ { + buf.WriteByte('}') // for the groups + } + if f.outputFormat == outputJSON { - buf.WriteByte('}') + buf.WriteByte('}') // for the whole line } + return buf.String() } @@ -298,9 +339,16 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc if len(kvList)%2 != 0 { kvList = append(kvList, noValue) } + copied := false for i := 0; i < len(kvList); i += 2 { k, ok := kvList[i].(string) if !ok { + if !copied { + newList := make([]any, len(kvList)) + copy(newList, kvList) + kvList = newList + copied = true + } k = f.nonStringKey(kvList[i]) kvList[i] = k } @@ -308,7 +356,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc if i > 0 || continuing { if f.outputFormat == outputJSON { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } else { // In theory the format could be something we don't understand. In // practice, we control it, so it won't be. @@ -316,24 +364,35 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc } } - if escapeKeys { - buf.WriteString(prettyString(k)) - } else { - // this is faster - buf.WriteByte('"') - buf.WriteString(k) - buf.WriteByte('"') - } - if f.outputFormat == outputJSON { - buf.WriteByte(':') - } else { - buf.WriteByte('=') - } + buf.WriteString(f.quoted(k, escapeKeys)) + buf.WriteByte(f.colon()) buf.WriteString(f.pretty(v)) } return kvList } +func (f Formatter) quoted(str string, escape bool) string { + if escape { + return prettyString(str) + } + // this is faster + return `"` + str + `"` +} + +func (f Formatter) comma() byte { + if f.outputFormat == outputJSON { + return ',' + } + return ' ' +} + +func (f Formatter) colon() byte { + if f.outputFormat == outputJSON { + return ':' + } + return '=' +} + func (f Formatter) pretty(value any) string { return f.prettyWithFlags(value, 0, 0) } @@ -407,12 +466,12 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { } for i := 0; i < len(v); i += 2 { if i > 0 { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } k, _ := v[i].(string) // sanitize() above means no need to check success // arbitrary keys might need escaping buf.WriteString(prettyString(k)) - buf.WriteByte(':') + buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) } if flags&flagRawStruct == 0 { @@ -481,7 +540,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { continue } if printComma { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } printComma = true // if we got here, we are rendering a field if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { @@ -492,10 +551,8 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { name = fld.Name } // field names can't contain characters which need escaping - buf.WriteByte('"') - buf.WriteString(name) - buf.WriteByte('"') - buf.WriteByte(':') + buf.WriteString(f.quoted(name, false)) + buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) } if flags&flagRawStruct == 0 { @@ -520,7 +577,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { buf.WriteByte('[') for i := 0; i < v.Len(); i++ { if i > 0 { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } e := v.Index(i) buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) @@ -534,7 +591,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { i := 0 for it.Next() { if i > 0 { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } // If a map key supports TextMarshaler, use it. keystr := "" @@ -556,7 +613,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { } } buf.WriteString(keystr) - buf.WriteByte(':') + buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) i++ } @@ -706,6 +763,53 @@ func (f Formatter) sanitize(kvList []any) []any { return kvList } +// startGroup opens a new group scope (basically a sub-struct), which locks all +// the current saved values and starts them anew. This is needed to satisfy +// slog. +func (f *Formatter) startGroup(group string) { + // Unnamed groups are just inlined. + if group == "" { + return + } + + // Any saved values can no longer be changed. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + continuing := false + + if f.parentValuesStr != "" { + buf.WriteString(f.parentValuesStr) + continuing = true + } + + if f.group != "" && f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') // for the group + continuing = false + } + + if f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.valuesStr) + } + + // NOTE: We don't close the scope here - that's done later, when a log line + // is actually rendered (because we have N scopes to close). + + f.parentValuesStr = buf.String() + + // Start collecting new values. + f.group = group + f.groupDepth++ + f.valuesStr = "" + f.values = nil +} + // Init configures this Formatter from runtime info, such as the call depth // imposed by logr itself. // Note that this receiver is a pointer, so depth can be saved. @@ -740,7 +844,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, args if policy := f.opts.LogCaller; policy == All || policy == Info { args = append(args, "caller", f.caller()) } - args = append(args, "level", level, "msg", msg) + if key := *f.opts.LogInfoLevel; key != "" { + args = append(args, key, level) + } + args = append(args, "msg", msg) return prefix, f.render(args, kvList) } diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go new file mode 100644 index 0000000000..7bd84761e2 --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/slogsink.go @@ -0,0 +1,105 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package funcr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +var _ logr.SlogSink = &fnlogger{} + +const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink + +func (l fnlogger) Handle(_ context.Context, record slog.Record) error { + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = attrToKVs(attr, kvList) + return true + }) + + if record.Level >= slog.LevelError { + l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...) + } + return nil +} + +func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = attrToKVs(attr, kvList) + } + l.AddValues(kvList) + return &l +} + +func (l fnlogger) WithGroup(name string) logr.SlogSink { + l.startGroup(name) + return &l +} + +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, grpKVs) + } + if attr.Key == "" { + // slog says we have to inline these + kvList = append(kvList, grpKVs...) + } else { + kvList = append(kvList, attr.Key, PseudoStruct(grpKVs)) + } + } else if attr.Key != "" { + kvList = append(kvList, attr.Key, attrVal.Any()) + } + + return kvList +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l fnlogger) levelFromSlog(level slog.Level) int { + result := -level + if result < 0 { + result = 0 // because LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index 2a5075a180..b4428e105b 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -207,10 +207,6 @@ limitations under the License. // those. package logr -import ( - "context" -) - // New returns a new Logger instance. This is primarily used by libraries // implementing LogSink, rather than end users. Passing a nil sink will create // a Logger which discards all log lines. @@ -410,45 +406,6 @@ func (l Logger) IsZero() bool { return l.sink == nil } -// contextKey is how we find Loggers in a context.Context. -type contextKey struct{} - -// FromContext returns a Logger from ctx or an error if no Logger is found. -func FromContext(ctx context.Context) (Logger, error) { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v, nil - } - - return Logger{}, notFoundError{} -} - -// notFoundError exists to carry an IsNotFound method. -type notFoundError struct{} - -func (notFoundError) Error() string { - return "no logr.Logger was present" -} - -func (notFoundError) IsNotFound() bool { - return true -} - -// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this -// returns a Logger that discards all log messages. -func FromContextOrDiscard(ctx context.Context) Logger { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v - } - - return Discard() -} - -// NewContext returns a new Context, derived from ctx, which carries the -// provided Logger. -func NewContext(ctx context.Context, logger Logger) context.Context { - return context.WithValue(ctx, contextKey{}, logger) -} - // RuntimeInfo holds information that the logr "core" library knows which // LogSinks might want to know. type RuntimeInfo struct { diff --git a/vendor/github.com/go-logr/logr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go new file mode 100644 index 0000000000..82d1ba4948 --- /dev/null +++ b/vendor/github.com/go-logr/logr/sloghandler.go @@ -0,0 +1,192 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" +) + +type slogHandler struct { + // May be nil, in which case all logs get discarded. + sink LogSink + // Non-nil if sink is non-nil and implements SlogSink. + slogSink SlogSink + + // groupPrefix collects values from WithGroup calls. It gets added as + // prefix to value keys when handling a log record. + groupPrefix string + + // levelBias can be set when constructing the handler to influence the + // slog.Level of log records. A positive levelBias reduces the + // slog.Level value. slog has no API to influence this value after the + // handler got created, so it can only be set indirectly through + // Logger.V. + levelBias slog.Level +} + +var _ slog.Handler = &slogHandler{} + +// groupSeparator is used to concatenate WithGroup names and attribute keys. +const groupSeparator = "." + +// GetLevel is used for black box unit testing. +func (l *slogHandler) GetLevel() slog.Level { + return l.levelBias +} + +func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool { + return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level))) +} + +func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { + if l.slogSink != nil { + // Only adjust verbosity level of log entries < slog.LevelError. + if record.Level < slog.LevelError { + record.Level -= l.levelBias + } + return l.slogSink.Handle(ctx, record) + } + + // No need to check for nil sink here because Handle will only be called + // when Enabled returned true. + + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = attrToKVs(attr, l.groupPrefix, kvList) + return true + }) + if record.Level >= slog.LevelError { + l.sinkWithCallDepth().Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.sinkWithCallDepth().Info(level, record.Message, kvList...) + } + return nil +} + +// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info +// are called by Handle, code in slog gets skipped. +// +// This offset currently (Go 1.21.0) works for calls through +// slog.New(ToSlogHandler(...)). There's no guarantee that the call +// chain won't change. Wrapping the handler will also break unwinding. It's +// still better than not adjusting at all.... +// +// This cannot be done when constructing the handler because FromSlogHandler needs +// access to the original sink without this adjustment. A second copy would +// work, but then WithAttrs would have to be called for both of them. +func (l *slogHandler) sinkWithCallDepth() LogSink { + if sink, ok := l.sink.(CallDepthLogSink); ok { + return sink.WithCallDepth(2) + } + return l.sink +} + +func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + if l.sink == nil || len(attrs) == 0 { + return l + } + + clone := *l + if l.slogSink != nil { + clone.slogSink = l.slogSink.WithAttrs(attrs) + clone.sink = clone.slogSink + } else { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = attrToKVs(attr, l.groupPrefix, kvList) + } + clone.sink = l.sink.WithValues(kvList...) + } + return &clone +} + +func (l *slogHandler) WithGroup(name string) slog.Handler { + if l.sink == nil { + return l + } + if name == "" { + // slog says to inline empty groups + return l + } + clone := *l + if l.slogSink != nil { + clone.slogSink = l.slogSink.WithGroup(name) + clone.sink = clone.slogSink + } else { + clone.groupPrefix = addPrefix(clone.groupPrefix, name) + } + return &clone +} + +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + prefix := groupPrefix + if attr.Key != "" { + prefix = addPrefix(groupPrefix, attr.Key) + } + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, prefix, grpKVs) + } + kvList = append(kvList, grpKVs...) + } else if attr.Key != "" { + kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any()) + } + + return kvList +} + +func addPrefix(prefix, name string) string { + if prefix == "" { + return name + } + if name == "" { + return prefix + } + return prefix + groupSeparator + name +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l *slogHandler) levelFromSlog(level slog.Level) int { + result := -level + result += l.levelBias // in case the original Logger had a V level + if result < 0 { + result = 0 // because LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go new file mode 100644 index 0000000000..28a83d0243 --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr.go @@ -0,0 +1,100 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" +) + +// FromSlogHandler returns a Logger which writes to the slog.Handler. +// +// The logr verbosity level is mapped to slog levels such that V(0) becomes +// slog.LevelInfo and V(4) becomes slog.LevelDebug. +func FromSlogHandler(handler slog.Handler) Logger { + if handler, ok := handler.(*slogHandler); ok { + if handler.sink == nil { + return Discard() + } + return New(handler.sink).V(int(handler.levelBias)) + } + return New(&slogSink{handler: handler}) +} + +// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger. +// +// The returned logger writes all records with level >= slog.LevelError as +// error log entries with LogSink.Error, regardless of the verbosity level of +// the Logger: +// +// logger := +// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) +// +// The level of all other records gets reduced by the verbosity +// level of the Logger and the result is negated. If it happens +// to be negative, then it gets replaced by zero because a LogSink +// is not expected to handled negative levels: +// +// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) +// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +func ToSlogHandler(logger Logger) slog.Handler { + if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { + return sink.handler + } + + handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} + if slogSink, ok := handler.sink.(SlogSink); ok { + handler.slogSink = slogSink + } + return handler +} + +// SlogSink is an optional interface that a LogSink can implement to support +// logging through the slog.Logger or slog.Handler APIs better. It then should +// also support special slog values like slog.Group. When used as a +// slog.Handler, the advantages are: +// +// - stack unwinding gets avoided in favor of logging the pre-recorded PC, +// as intended by slog +// - proper grouping of key/value pairs via WithGroup +// - verbosity levels > slog.LevelInfo can be recorded +// - less overhead +// +// Both APIs (Logger and slog.Logger/Handler) then are supported equally +// well. Developers can pick whatever API suits them better and/or mix +// packages which use either API in the same binary with a common logging +// implementation. +// +// This interface is necessary because the type implementing the LogSink +// interface cannot also implement the slog.Handler interface due to the +// different prototype of the common Enabled method. +// +// An implementation could support both interfaces in two different types, but then +// additional interfaces would be needed to convert between those types in FromSlogHandler +// and ToSlogHandler. +type SlogSink interface { + LogSink + + Handle(ctx context.Context, record slog.Record) error + WithAttrs(attrs []slog.Attr) SlogSink + WithGroup(name string) SlogSink +} diff --git a/vendor/github.com/go-logr/logr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go new file mode 100644 index 0000000000..4060fcbc2b --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogsink.go @@ -0,0 +1,120 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" + "runtime" + "time" +) + +var ( + _ LogSink = &slogSink{} + _ CallDepthLogSink = &slogSink{} + _ Underlier = &slogSink{} +) + +// Underlier is implemented by the LogSink returned by NewFromLogHandler. +type Underlier interface { + // GetUnderlying returns the Handler used by the LogSink. + GetUnderlying() slog.Handler +} + +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" + + // errKey is used to log the error parameter of Error as an additional attribute. + errKey = "err" +) + +type slogSink struct { + callDepth int + name string + handler slog.Handler +} + +func (l *slogSink) Init(info RuntimeInfo) { + l.callDepth = info.CallDepth +} + +func (l *slogSink) GetUnderlying() slog.Handler { + return l.handler +} + +func (l *slogSink) WithCallDepth(depth int) LogSink { + newLogger := *l + newLogger.callDepth += depth + return &newLogger +} + +func (l *slogSink) Enabled(level int) bool { + return l.handler.Enabled(context.Background(), slog.Level(-level)) +} + +func (l *slogSink) Info(level int, msg string, kvList ...interface{}) { + l.log(nil, msg, slog.Level(-level), kvList...) +} + +func (l *slogSink) Error(err error, msg string, kvList ...interface{}) { + l.log(err, msg, slog.LevelError, kvList...) +} + +func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) { + var pcs [1]uintptr + // skip runtime.Callers, this function, Info/Error, and all helper functions above that. + runtime.Callers(3+l.callDepth, pcs[:]) + + record := slog.NewRecord(time.Now(), level, msg, pcs[0]) + if l.name != "" { + record.AddAttrs(slog.String(nameKey, l.name)) + } + if err != nil { + record.AddAttrs(slog.Any(errKey, err)) + } + record.Add(kvList...) + _ = l.handler.Handle(context.Background(), record) +} + +func (l slogSink) WithName(name string) LogSink { + if l.name != "" { + l.name += "/" + } + l.name += name + return &l +} + +func (l slogSink) WithValues(kvList ...interface{}) LogSink { + l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...)) + return &l +} + +func kvListToAttrs(kvList ...interface{}) []slog.Attr { + // We don't need the record itself, only its Add method. + record := slog.NewRecord(time.Time{}, 0, "", 0) + record.Add(kvList...) + attrs := make([]slog.Attr, 0, record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + attrs = append(attrs, attr) + return true + }) + return attrs +} diff --git a/vendor/github.com/nutsdb/nutsdb/README-CN.md b/vendor/github.com/nutsdb/nutsdb/README-CN.md index aa8d3cdc60..217d130d16 100644 --- a/vendor/github.com/nutsdb/nutsdb/README-CN.md +++ b/vendor/github.com/nutsdb/nutsdb/README-CN.md @@ -2382,6 +2382,12 @@ nutsDB不会出现“不可重复读”这种情况,当高并发的时候, * 提pull requests * 优化修改README文档 +感谢以下贡献者,感谢你们的付出! + + + + + #### 代码风格指南参考 https://github.com/golang/go/wiki/CodeReviewComments diff --git a/vendor/github.com/nutsdb/nutsdb/README.md b/vendor/github.com/nutsdb/nutsdb/README.md index 94fe1b0562..34f6d57b1e 100644 --- a/vendor/github.com/nutsdb/nutsdb/README.md +++ b/vendor/github.com/nutsdb/nutsdb/README.md @@ -127,9 +127,13 @@ func main() { - [Benchmark](./docs/user_guides/benchmarks.md) -## Contributing +## Contributors -See [CONTRIBUTING](https://github.com/nutsdb/nutsdb/blob/master/CONTRIBUTING.md) for details on submitting patches and the contribution workflow. +Thank you for considering contributing to NutsDB! The contribution guide can be found in the [CONTRIBUTING](https://github.com/nutsdb/nutsdb/blob/master/CONTRIBUTING.md) for details on submitting patches and the contribution workflow. + + + + ## Acknowledgements diff --git a/vendor/github.com/nutsdb/nutsdb/pending.go b/vendor/github.com/nutsdb/nutsdb/pending.go new file mode 100644 index 0000000000..42f335f6c8 --- /dev/null +++ b/vendor/github.com/nutsdb/nutsdb/pending.go @@ -0,0 +1,38 @@ +package nutsdb + +// pendingBucketList the uncommitted bucket changes in this Tx +type pendingBucketList map[Ds]map[BucketName]*Bucket + +// pendingEntryList the uncommitted Entry changes in this Tx +type pendingEntryList struct { + entries map[Ds]map[BucketName][]*Entry + size int +} + +// rangeBucket input a range handler function f and call it with every bucket in pendingBucketList +func (p pendingBucketList) rangeBucket(f func(bucket *Bucket) error) error { + for _, bucketsInDs := range p { + for _, bucket := range bucketsInDs { + err := f(bucket) + if err != nil { + return err + } + } + } + return nil +} + +// toList collect all the entries in pendingEntryList to a list. +func (ens *pendingEntryList) toList() []*Entry { + list := make([]*Entry, ens.size) + var i int + for _, entriesInDS := range ens.entries { + for _, entries := range entriesInDS { + for _, entry := range entries { + list[i] = entry + i++ + } + } + } + return list +} diff --git a/vendor/github.com/nutsdb/nutsdb/tx.go b/vendor/github.com/nutsdb/nutsdb/tx.go index b5a27e825a..c0f7ce0b0f 100644 --- a/vendor/github.com/nutsdb/nutsdb/tx.go +++ b/vendor/github.com/nutsdb/nutsdb/tx.go @@ -34,15 +34,6 @@ const ( txStatusClosed = 3 ) -// pendingBucketList the uncommitted bucket changes in this Tx -type pendingBucketList map[Ds]map[BucketName]*Bucket - -// pendingEntryList the uncommitted Entry changes in this Tx -type pendingEntryList struct { - entries map[Ds]map[BucketName][]*Entry - size int -} - // Tx represents a transaction. type Tx struct { id uint64 @@ -60,20 +51,6 @@ type txnCb struct { err error } -func (ens *pendingEntryList) toList() []*Entry { - list := make([]*Entry, ens.size) - var i int - for _, entriesInDS := range ens.entries { - for _, entries := range entriesInDS { - for _, entry := range entries { - list[i] = entry - i++ - } - } - } - return list -} - func (tx *Tx) submitEntry(e *Entry) error { ds := e.Meta.Ds ens := tx.pendingWrites @@ -814,41 +791,41 @@ func (tx *Tx) getChangeCountInEntriesChanges() int64 { func (tx *Tx) getChangeCountInBucketChanges() int64 { var res int64 - for _, bucketsInDs := range tx.pendingBucketList { - for _, bucket := range bucketsInDs { - bucketId := bucket.Id - if bucket.Meta.Op == BucketDeleteOperation { - switch bucket.Ds { - case DataStructureBTree: - if bTree, ok := tx.db.Index.bTree.idx[bucketId]; ok { - res -= int64(bTree.Count()) - } - case DataStructureSet: - if set, ok := tx.db.Index.set.idx[bucketId]; ok { - for key := range set.M { - res -= int64(set.SCard(key)) - } + var f = func(bucket *Bucket) error { + bucketId := bucket.Id + if bucket.Meta.Op == BucketDeleteOperation { + switch bucket.Ds { + case DataStructureBTree: + if bTree, ok := tx.db.Index.bTree.idx[bucketId]; ok { + res -= int64(bTree.Count()) + } + case DataStructureSet: + if set, ok := tx.db.Index.set.idx[bucketId]; ok { + for key := range set.M { + res -= int64(set.SCard(key)) } - case DataStructureSortedSet: - if sortedSet, ok := tx.db.Index.sortedSet.idx[bucketId]; ok { - for key := range sortedSet.M { - curLen, _ := sortedSet.ZCard(key) - res -= int64(curLen) - } + } + case DataStructureSortedSet: + if sortedSet, ok := tx.db.Index.sortedSet.idx[bucketId]; ok { + for key := range sortedSet.M { + curLen, _ := sortedSet.ZCard(key) + res -= int64(curLen) } - case DataStructureList: - if list, ok := tx.db.Index.list.idx[bucketId]; ok { - for key := range list.Items { - curLen, _ := list.Size(key) - res -= int64(curLen) - } + } + case DataStructureList: + if list, ok := tx.db.Index.list.idx[bucketId]; ok { + for key := range list.Items { + curLen, _ := list.Size(key) + res -= int64(curLen) } - default: - panic(fmt.Sprintf("there is an unexpected data structure that is unimplemented in our database.:%d", bucket.Ds)) } + default: + panic(fmt.Sprintf("there is an unexpected data structure that is unimplemented in our database.:%d", bucket.Ds)) } } + return nil } + _ = tx.pendingBucketList.rangeBucket(f) return res } diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go index 4f506f8791..199c21d27a 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.7 && amd64 && gc && !purego +//go:build amd64 && gc && !purego package blake2b diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s index 353bb7cac5..9ae8206c20 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.7 && amd64 && gc && !purego +//go:build amd64 && gc && !purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go deleted file mode 100644 index 1d0770abba..0000000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 && amd64 && gc && !purego - -package blake2b - -import "golang.org/x/sys/cpu" - -func init() { - useSSE4 = cpu.X86.HasSSE41 -} - -//go:noescape -func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - if useSSE4 { - hashBlocksSSE4(h, c, flag, blocks) - } else { - hashBlocksGeneric(h, c, flag, blocks) - } -} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go index d9fcac3a4d..54e446e1d2 100644 --- a/vendor/golang.org/x/crypto/blake2b/register.go +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.9 - package blake2b import ( diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s index 8fb26aebb2..1f53938861 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -319,9 +319,9 @@ MOVQ rDi, _si(oState); \ MOVQ rDo, _so(oState) \ -// func keccakF1600(state *[25]uint64) +// func keccakF1600(a *[25]uint64) TEXT ·keccakF1600(SB), 0, $200-8 - MOVQ state+0(FP), rpState + MOVQ a+0(FP), rpState // Convert the user state into an internal state NOTQ _be(rpState) diff --git a/vendor/modernc.org/libc/libc_windows.go b/vendor/modernc.org/libc/libc_windows.go index 81f4078b57..a92964009d 100644 --- a/vendor/modernc.org/libc/libc_windows.go +++ b/vendor/modernc.org/libc/libc_windows.go @@ -7307,3 +7307,29 @@ func X__stdio_common_vsscanf(t *TLS, args ...interface{}) int32 { panic("TO func X__stdio_common_vswprintf(t *TLS, args ...interface{}) int32 { panic("TODO") } func X__stdio_common_vswprintf_s(t *TLS, args ...interface{}) int32 { panic("TODO") } func X__stdio_common_vswscanf(t *TLS, args ...interface{}) int32 { panic("TODO") } + +func X_lseeki64(t *TLS, fd int32, offset int64, whence int32) int64 { + if __ccgo_strace { + trc("t=%v fd=%v offset=%v whence=%v, (%v:)", t, fd, offset, whence, origin(2)) + } + + f, ok := fdToFile(fd) + if !ok { + t.setErrno(errno.EBADF) + return -1 + } + + n, err := syscall.Seek(f.Handle, offset, int(whence)) + if err != nil { + if dmesgs { + dmesg("%v: fd %v, off %#x, whence %v: %v", origin(1), f._fd, offset, whenceStr(whence), n) + } + t.setErrno(err) + return -1 + } + + if dmesgs { + dmesg("%v: fd %v, off %#x, whence %v: ok", origin(1), f._fd, offset, whenceStr(whence)) + } + return n +} diff --git a/vendor/modules.txt b/vendor/modules.txt index ec384ec977..c20dbffc42 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -47,7 +47,7 @@ github.com/CortexFoundation/robot/backend # github.com/CortexFoundation/statik v0.0.0-20210315012922-8bb8a7b5dc66 ## explicit; go 1.16 github.com/CortexFoundation/statik -# github.com/CortexFoundation/torrentfs v1.0.56-0.20231216192420-89d0e1363db7 +# github.com/CortexFoundation/torrentfs v1.0.56-0.20231221154219-46968989e786 ## explicit; go 1.21 github.com/CortexFoundation/torrentfs github.com/CortexFoundation/torrentfs/backend @@ -57,13 +57,13 @@ github.com/CortexFoundation/torrentfs/compress github.com/CortexFoundation/torrentfs/params github.com/CortexFoundation/torrentfs/tool github.com/CortexFoundation/torrentfs/types -# github.com/CortexFoundation/wormhole v0.0.2-0.20231018202213-693acd0cc941 +# github.com/CortexFoundation/wormhole v0.0.2-0.20231221153655-0321e1fe971c ## explicit; go 1.20 github.com/CortexFoundation/wormhole # github.com/DataDog/zstd v1.5.6-0.20230622172052-ea68dcab66c0 ## explicit; go 1.14 github.com/DataDog/zstd -# github.com/RoaringBitmap/roaring v1.6.0 +# github.com/RoaringBitmap/roaring v1.7.0 ## explicit; go 1.14 github.com/RoaringBitmap/roaring github.com/RoaringBitmap/roaring/BitSliceIndexing @@ -222,10 +222,10 @@ github.com/aws/aws-sdk-go-v2/internal/shareddefaults github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/config v1.26.1 +# github.com/aws/aws-sdk-go-v2/config v1.26.2 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.16.12 +# github.com/aws/aws-sdk-go-v2/credentials v1.16.13 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds @@ -253,7 +253,7 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding # github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/route53 v1.35.5 +# github.com/aws/aws-sdk-go-v2/service/route53 v1.36.0 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/route53 github.com/aws/aws-sdk-go-v2/service/route53/internal/customizations @@ -269,7 +269,7 @@ github.com/aws/aws-sdk-go-v2/service/sso/types github.com/aws/aws-sdk-go-v2/service/ssooidc github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ssooidc/types -# github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 +# github.com/aws/aws-sdk-go-v2/service/sts v1.26.6 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints @@ -358,7 +358,7 @@ github.com/cockroachdb/errors/withstack # github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b ## explicit; go 1.16 github.com/cockroachdb/logtags -# github.com/cockroachdb/pebble v0.0.0-20231214172447-ab4952c5f87b +# github.com/cockroachdb/pebble v0.0.0-20231220182916-5be92739e7bb ## explicit; go 1.21 github.com/cockroachdb/pebble github.com/cockroachdb/pebble/bloom @@ -460,7 +460,7 @@ github.com/deckarep/golang-set/v2 ## explicit; go 1.17 github.com/decred/dcrd/dcrec/secp256k1/v4 github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa -# github.com/dgraph-io/badger/v4 v4.2.1-0.20231013074411-fb1b00959581 +# github.com/dgraph-io/badger/v4 v4.2.1-0.20231218065111-7b5baa11879c ## explicit; go 1.19 github.com/dgraph-io/badger/v4 github.com/dgraph-io/badger/v4/fb @@ -542,7 +542,7 @@ github.com/go-llsqlite/adapter/sqlitex github.com/go-llsqlite/crawshaw github.com/go-llsqlite/crawshaw/c github.com/go-llsqlite/crawshaw/sqlitex -# github.com/go-logr/logr v1.3.0 +# github.com/go-logr/logr v1.4.0 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr @@ -766,7 +766,7 @@ github.com/naoina/go-stringutil ## explicit github.com/naoina/toml github.com/naoina/toml/ast -# github.com/nutsdb/nutsdb v1.0.3-0.20231216104420-099901a09ea1 +# github.com/nutsdb/nutsdb v1.0.3-0.20231219150513-49211584c107 ## explicit; go 1.18 github.com/nutsdb/nutsdb # github.com/nxadm/tail v1.4.11 @@ -990,7 +990,7 @@ github.com/ucwong/filecache # github.com/ucwong/go-ttlmap v1.0.2-0.20221020173635-331e7ddde2bb ## explicit; go 1.19 github.com/ucwong/go-ttlmap -# github.com/ucwong/golang-kv v1.0.23-0.20231216115725-4f38a0fd08a6 +# github.com/ucwong/golang-kv v1.0.23-0.20231220222728-54b1adf96ed4 ## explicit; go 1.21 github.com/ucwong/golang-kv github.com/ucwong/golang-kv/badger @@ -1060,7 +1060,7 @@ go.uber.org/automaxprocs go.uber.org/automaxprocs/internal/cgroups go.uber.org/automaxprocs/internal/runtime go.uber.org/automaxprocs/maxprocs -# golang.org/x/crypto v0.16.0 +# golang.org/x/crypto v0.17.0 ## explicit; go 1.18 golang.org/x/crypto/blake2b golang.org/x/crypto/cast5 @@ -1081,7 +1081,7 @@ golang.org/x/crypto/ripemd160 golang.org/x/crypto/scrypt golang.org/x/crypto/sha3 golang.org/x/crypto/ssh/terminal -# golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611 +# golang.org/x/exp v0.0.0-20231219180239-dc181d75b848 ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps @@ -1230,7 +1230,7 @@ gopkg.in/urfave/cli.v1 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# modernc.org/libc v1.37.6 +# modernc.org/libc v1.38.0 ## explicit; go 1.20 modernc.org/libc modernc.org/libc/errno