diff --git a/go.mod b/go.mod
index 358799d1f4..964d0ff315 100644
--- a/go.mod
+++ b/go.mod
@@ -6,7 +6,7 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1
github.com/CortexFoundation/inference v1.0.2-0.20230307032835-9197d586a4e8
github.com/CortexFoundation/statik v0.0.0-20210315012922-8bb8a7b5dc66
- github.com/CortexFoundation/torrentfs v1.0.55-0.20230925132950-bf8f925efaea
+ github.com/CortexFoundation/torrentfs v1.0.55-0.20230928134616-cf4f35fd8ad8
github.com/VictoriaMetrics/fastcache v1.12.1
github.com/arsham/figurine v1.3.0
github.com/aws/aws-sdk-go-v2 v1.21.0
@@ -17,13 +17,13 @@ require (
github.com/cespare/cp v1.1.1
github.com/charmbracelet/bubbletea v0.24.2
github.com/cloudflare/cloudflare-go v0.57.1
- github.com/cockroachdb/pebble v0.0.0-20230922144958-86593692e09f
+ github.com/cockroachdb/pebble v0.0.0-20230927205513-725ebe297867
github.com/consensys/gnark-crypto v0.11.3-0.20230906172141-49815a21349a
github.com/crate-crypto/go-kzg-4844 v0.3.0
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/deckarep/golang-set/v2 v2.3.1
github.com/docker/docker v24.0.6+incompatible
- github.com/dop251/goja v0.0.0-20230828202809-3dbe69dd2b8e
+ github.com/dop251/goja v0.0.0-20230919151941-fc55792775de
github.com/ethereum/c-kzg-4844 v0.3.1
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e
github.com/fjl/memsize v0.0.1
@@ -82,7 +82,7 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect
github.com/CortexFoundation/cvm-runtime v0.0.0-20221117094012-b5a251885572 // indirect
github.com/CortexFoundation/merkletree v0.0.0-20230724124840-b6e80265a137 // indirect
- github.com/CortexFoundation/robot v1.0.7-0.20230924205950-05c3925242ed // indirect
+ github.com/CortexFoundation/robot v1.0.7-0.20230925095445-ed66eab85b41 // indirect
github.com/CortexFoundation/wormhole v0.0.2-0.20230922082251-f97b53242e48 // indirect
github.com/DataDog/zstd v1.5.6-0.20230622172052-ea68dcab66c0 // indirect
github.com/RoaringBitmap/roaring v1.3.0 // indirect
@@ -101,7 +101,7 @@ require (
github.com/anacrolix/multiless v0.3.1-0.20221221005021-2d12701f83f7 // indirect
github.com/anacrolix/stm v0.5.0 // indirect
github.com/anacrolix/sync v0.4.0 // indirect
- github.com/anacrolix/torrent v1.52.6-0.20230916034836-b84b19cc4c45 // indirect
+ github.com/anacrolix/torrent v1.52.6-0.20230926122046-f009e1d583cb // indirect
github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 // indirect
github.com/anacrolix/utp v0.2.0 // indirect
github.com/antlabs/stl v0.0.1 // indirect
@@ -135,7 +135,7 @@ require (
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
- github.com/dgraph-io/badger/v4 v4.2.1-0.20230909123407-5f004c4ef084 // indirect
+ github.com/dgraph-io/badger/v4 v4.2.1-0.20230927164310-2aea1ca26005 // indirect
github.com/dgraph-io/ristretto v0.1.1 // indirect
github.com/dlclark/regexp2 v1.7.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
@@ -218,7 +218,7 @@ require (
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/ucwong/filecache v1.0.6-0.20230405163841-810d53ced4bd // indirect
github.com/ucwong/go-ttlmap v1.0.2-0.20221020173635-331e7ddde2bb // indirect
- github.com/ucwong/golang-kv v1.0.23-0.20230922195406-1f1883da3532 // indirect
+ github.com/ucwong/golang-kv v1.0.23-0.20230928100657-72a7bea7f86f // indirect
github.com/ucwong/shard v1.0.1-0.20230924231639-2ac2d8ab288c // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/xujiajun/mmap-go v1.0.1 // indirect
diff --git a/go.sum b/go.sum
index 7f01c0b2f2..0f7467d383 100644
--- a/go.sum
+++ b/go.sum
@@ -60,15 +60,15 @@ github.com/CortexFoundation/inference v1.0.2-0.20230307032835-9197d586a4e8 h1:W/
github.com/CortexFoundation/inference v1.0.2-0.20230307032835-9197d586a4e8/go.mod h1:Doj3mBNzdjCDvKVwysKaHEPbS20A7RRaQY0bHtEVz88=
github.com/CortexFoundation/merkletree v0.0.0-20230724124840-b6e80265a137 h1:GdrLwJRKPrUSd4V/cBpPyzhNQEo8IDT7Le15hmuC+/4=
github.com/CortexFoundation/merkletree v0.0.0-20230724124840-b6e80265a137/go.mod h1:OwfhC316GcEJ9QVNWPqj6QV7sorcXBnc0P9p8dPfSbM=
-github.com/CortexFoundation/robot v1.0.7-0.20230924205950-05c3925242ed h1:LuwYo3FNfIvy0MDfrQRkW31MTtDz80wikKhaw/I7NXA=
-github.com/CortexFoundation/robot v1.0.7-0.20230924205950-05c3925242ed/go.mod h1:8y8iDdeeXuTGBxQihkt+5KnwL6OtCLeA7HGpHm5+S1w=
+github.com/CortexFoundation/robot v1.0.7-0.20230925095445-ed66eab85b41 h1:wM27dPLQOcOP+5lgqoe/3pfHhN2FpCYrhM+8cE1JJPU=
+github.com/CortexFoundation/robot v1.0.7-0.20230925095445-ed66eab85b41/go.mod h1:8y8iDdeeXuTGBxQihkt+5KnwL6OtCLeA7HGpHm5+S1w=
github.com/CortexFoundation/statik v0.0.0-20210315012922-8bb8a7b5dc66 h1:yJbN4DFvpStCShXOVxNV64aawsPqizLuXZhrnhCr2fY=
github.com/CortexFoundation/statik v0.0.0-20210315012922-8bb8a7b5dc66/go.mod h1:AkjV4OECAskB9m6w+2e84F0Zcx7oZWEmHB3EKoaDXYk=
github.com/CortexFoundation/torrentfs v1.0.13-0.20200623060705-ce027f43f2f8/go.mod h1:Ma+tGhPPvz4CEZHaqEJQMOEGOfHeQBiAoNd1zyc/w3Q=
github.com/CortexFoundation/torrentfs v1.0.14-0.20200703071639-3fcabcabf274/go.mod h1:qnb3YlIJmuetVBtC6Lsejr0Xru+1DNmDCdTqnwy7lhk=
github.com/CortexFoundation/torrentfs v1.0.20-0.20200810031954-d36d26f82fcc/go.mod h1:N5BsicP5ynjXIi/Npl/SRzlJ630n1PJV2sRj0Z0t2HA=
-github.com/CortexFoundation/torrentfs v1.0.55-0.20230925132950-bf8f925efaea h1:2mOtJfaSmXlPsl6ehyHN28ErOk54eoCaDaxg4BJZmnU=
-github.com/CortexFoundation/torrentfs v1.0.55-0.20230925132950-bf8f925efaea/go.mod h1:eGj6kGpTRO5AbYo2z2FkGlXSL1CcK7fG5TIGaXMBGNY=
+github.com/CortexFoundation/torrentfs v1.0.55-0.20230928134616-cf4f35fd8ad8 h1:QDYy+5OBCzu3R4NXhfvtShWPiWBeJ3cKwgqPbeo6Qso=
+github.com/CortexFoundation/torrentfs v1.0.55-0.20230928134616-cf4f35fd8ad8/go.mod h1:TtxTSsJ+MIZOviVV1ecz9hNYC20dxg/gDJR3l90gNpo=
github.com/CortexFoundation/wormhole v0.0.2-0.20230922082251-f97b53242e48 h1:EDrk6U+GjSJ1FdbTrtRDe3LA/Ot6E3xu/HpXAio99B4=
github.com/CortexFoundation/wormhole v0.0.2-0.20230922082251-f97b53242e48/go.mod h1:a2ynt5IqAlGTWLQY0pILqkxYe4AzHLNd+bPmK/r03oE=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
@@ -211,8 +211,8 @@ github.com/anacrolix/torrent v1.15.0/go.mod h1:MFc6KcbpAyfwGqOyRkdarUK9QnKA/FkVg
github.com/anacrolix/torrent v1.15.1-0.20200504230043-cc5d2abe18e5/go.mod h1:QlOfgrCz5kbvhOz8M58dUwHY5SfZ9VbIvReZ0z0MdIk=
github.com/anacrolix/torrent v1.15.1-0.20200619022403-dd51e99b88cc/go.mod h1:wuopQPC5+/M+zHYvhcA2vp5UCTm9rUc+VqjyBa882Q8=
github.com/anacrolix/torrent v1.15.1-0.20200715061614-dd906f8fa72e/go.mod h1:XWo/fJN1oKgcjgxM+pUZpvalHfqHDs27BY5mBZjIQWo=
-github.com/anacrolix/torrent v1.52.6-0.20230916034836-b84b19cc4c45 h1:kdVm5twSlYXonHcLG17LC/D42UhPUDfLOm/F21kslmk=
-github.com/anacrolix/torrent v1.52.6-0.20230916034836-b84b19cc4c45/go.mod h1:6lKyJNzkkY68p+LeSfv62auyyceWn12Uji+kme5cpaI=
+github.com/anacrolix/torrent v1.52.6-0.20230926122046-f009e1d583cb h1:VpGwTQi/NUbXf1syyfxdhW3CBXnWT2vawLtYA1pYe84=
+github.com/anacrolix/torrent v1.52.6-0.20230926122046-f009e1d583cb/go.mod h1:Ma/WtLey9lU97u2i55LUJ8AnXaL2GfEK6pWh7/9v1hI=
github.com/anacrolix/upnp v0.1.1/go.mod h1:LXsbsp5h+WGN7YR+0A7iVXm5BL1LYryDev1zuJMWYQo=
github.com/anacrolix/upnp v0.1.2-0.20200416075019-5e9378ed1425/go.mod h1:Pz94W3kl8rf+wxH3IbCa9Sq+DTJr8OSbV2Q3/y51vYs=
github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 h1:QAVZ3pN/J4/UziniAhJR2OZ9Ox5kOY2053tBbbqUPYA=
@@ -348,8 +348,8 @@ github.com/cockroachdb/errors v1.10.0 h1:lfxS8zZz1+OjtV4MtNWgboi/W5tyLEB6VQZBXN+
github.com/cockroachdb/errors v1.10.0/go.mod h1:lknhIsEVQ9Ss/qKDBQS/UqFSvPQjOwNq2qyKAxtHRqE=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
-github.com/cockroachdb/pebble v0.0.0-20230922144958-86593692e09f h1:yD41mae3ywx2xyQm7RhdHBX2EXoDGWxdpylFddICz1M=
-github.com/cockroachdb/pebble v0.0.0-20230922144958-86593692e09f/go.mod h1:nindLFinxeDPjP4qI9LtVHAwDef57/0s5KMfWgdktQc=
+github.com/cockroachdb/pebble v0.0.0-20230927205513-725ebe297867 h1:O/fBxpVLLedWnVw+kRDe2rcybSGFLiLXnnHgyrT/Pr0=
+github.com/cockroachdb/pebble v0.0.0-20230927205513-725ebe297867/go.mod h1:nindLFinxeDPjP4qI9LtVHAwDef57/0s5KMfWgdktQc=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
@@ -398,8 +398,8 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etly
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
-github.com/dgraph-io/badger/v4 v4.2.1-0.20230909123407-5f004c4ef084 h1:IDcz/RVppqLIFEAbPslSr5ga89psLD94/GW2LWu3Swg=
-github.com/dgraph-io/badger/v4 v4.2.1-0.20230909123407-5f004c4ef084/go.mod h1:p6XWc7eHqfhKDjoLLcqJS/PE0/d6mV1qGYrbZzbKHmE=
+github.com/dgraph-io/badger/v4 v4.2.1-0.20230927164310-2aea1ca26005 h1:BqRNH4EULF9g802jeDz+SQuSJx74C0lzMae3D5KdaSo=
+github.com/dgraph-io/badger/v4 v4.2.1-0.20230927164310-2aea1ca26005/go.mod h1:WhH+o5WylpkniTpKsBkWx15GZUJGwOApGLbvLsNU+DY=
github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8=
github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
@@ -419,8 +419,8 @@ github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bc
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA=
github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
-github.com/dop251/goja v0.0.0-20230828202809-3dbe69dd2b8e h1:UvQD6hTSfeM6hhTQ24Dlw2RppP05W7SWbWb6kubJAog=
-github.com/dop251/goja v0.0.0-20230828202809-3dbe69dd2b8e/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4=
+github.com/dop251/goja v0.0.0-20230919151941-fc55792775de h1:lA38Xtzr1Wo+iQdkN2E11ziKXJYRxLlzK/e2/fdxoEI=
+github.com/dop251/goja v0.0.0-20230919151941-fc55792775de/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4=
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@@ -1252,8 +1252,8 @@ github.com/ucwong/filecache v1.0.6-0.20230405163841-810d53ced4bd h1:gBtlvLAsgLk+
github.com/ucwong/filecache v1.0.6-0.20230405163841-810d53ced4bd/go.mod h1:ddwX+NCjMZPdpzcGh1fcEbNTUTCtKgt2hC2rqvmLKgA=
github.com/ucwong/go-ttlmap v1.0.2-0.20221020173635-331e7ddde2bb h1:dVZH3AH9f7zB3VBmsjn25B7lfcAyMP4QxdFYTrfj7tg=
github.com/ucwong/go-ttlmap v1.0.2-0.20221020173635-331e7ddde2bb/go.mod h1:3yswsBsVuwsOjDvFfC5Na9XSEf4HC7mj3W3g6jvSY/s=
-github.com/ucwong/golang-kv v1.0.23-0.20230922195406-1f1883da3532 h1:12fnD9i+/p08dB+VAa40SCOqG9tMQQ3JhuRfmq6oyFs=
-github.com/ucwong/golang-kv v1.0.23-0.20230922195406-1f1883da3532/go.mod h1:43r9yshOdrC5+EP/qGkZXNtFggL8+niRSbZuS3hwiRs=
+github.com/ucwong/golang-kv v1.0.23-0.20230928100657-72a7bea7f86f h1:Qdn58gT8RlwtDC4bMnwwTDSRTw+qeXr5d+glvVZBXp8=
+github.com/ucwong/golang-kv v1.0.23-0.20230928100657-72a7bea7f86f/go.mod h1:VDnYcNcJ7OiGcEGe4gMrysnDMi+up8uUQXEQTKnr+j8=
github.com/ucwong/golang-set v1.8.1-0.20200419153428-d7b0b1ac2d43/go.mod h1:xu0FaiQFGbBcFZj2o7udZ5rbA8jRTsv47hkPoG5qQNM=
github.com/ucwong/goleveldb v1.0.3-0.20200508074755-578cba616f37/go.mod h1:dgJUTtDxq/ne6/JzZhHzF24OL/uqILz9IWk8HmT4V2g=
github.com/ucwong/goleveldb v1.0.3-0.20200618184106-f1c6bc3a428b/go.mod h1:7Sq6w7AfEZuB/a6mrlvHCSXCSkqojCMMrM3Ei12QAT0=
diff --git a/vendor/github.com/CortexFoundation/robot/config.go b/vendor/github.com/CortexFoundation/robot/config.go
index 912cf50312..2c0b443480 100644
--- a/vendor/github.com/CortexFoundation/robot/config.go
+++ b/vendor/github.com/CortexFoundation/robot/config.go
@@ -17,7 +17,6 @@
package robot
import (
- "github.com/CortexFoundation/CortexTheseus/metrics"
"time"
)
@@ -26,10 +25,3 @@ const (
delay = 12 //params.Delay
timeout = 30 * time.Second
)
-
-var (
- rpcBlockMeter = metrics.NewRegisteredMeter("torrent/block/call", nil)
- rpcCurrentMeter = metrics.NewRegisteredMeter("torrent/current/call", nil)
- rpcUploadMeter = metrics.NewRegisteredMeter("torrent/upload/call", nil)
- rpcReceiptMeter = metrics.NewRegisteredMeter("torrent/receipt/call", nil)
-)
diff --git a/vendor/github.com/CortexFoundation/robot/metrics.go b/vendor/github.com/CortexFoundation/robot/metrics.go
new file mode 100644
index 0000000000..7f2219fce8
--- /dev/null
+++ b/vendor/github.com/CortexFoundation/robot/metrics.go
@@ -0,0 +1,28 @@
+// Copyright 2023 The CortexTheseus Authors
+// This file is part of the CortexTheseus library.
+//
+// The CortexTheseus library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The CortexTheseus library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the CortexTheseus library. If not, see .
+
+package robot
+
+import (
+ "github.com/CortexFoundation/CortexTheseus/metrics"
+)
+
+var (
+ rpcBlockMeter = metrics.NewRegisteredMeter("torrent/block/call", nil)
+ rpcCurrentMeter = metrics.NewRegisteredMeter("torrent/current/call", nil)
+ rpcUploadMeter = metrics.NewRegisteredMeter("torrent/upload/call", nil)
+ rpcReceiptMeter = metrics.NewRegisteredMeter("torrent/receipt/call", nil)
+)
diff --git a/vendor/github.com/anacrolix/torrent/torrent.go b/vendor/github.com/anacrolix/torrent/torrent.go
index 8b47452188..6385a3fc02 100644
--- a/vendor/github.com/anacrolix/torrent/torrent.go
+++ b/vendor/github.com/anacrolix/torrent/torrent.go
@@ -2143,7 +2143,7 @@ func (t *Torrent) pieceHashed(piece pieceIndex, passed bool, hashIoErr error) {
}
err := p.Storage().MarkComplete()
if err != nil {
- t.logger.Printf("%T: error marking piece complete %d: %s", t.storage, piece, err)
+ t.logger.Levelf(log.Warning, "%T: error marking piece complete %d: %s", t.storage, piece, err)
}
t.cl.lock()
diff --git a/vendor/github.com/anacrolix/torrent/tracker/client.go b/vendor/github.com/anacrolix/torrent/tracker/client.go
index 1aaf256119..3b7e2aba36 100644
--- a/vendor/github.com/anacrolix/torrent/tracker/client.go
+++ b/vendor/github.com/anacrolix/torrent/tracker/client.go
@@ -9,10 +9,12 @@ import (
trHttp "github.com/anacrolix/torrent/tracker/http"
"github.com/anacrolix/torrent/tracker/udp"
+ "github.com/anacrolix/torrent/types/infohash"
)
type Client interface {
Announce(context.Context, AnnounceRequest, AnnounceOpt) (AnnounceResponse, error)
+ Scrape(ctx context.Context, ihs []infohash.T) (out udp.ScrapeResponse, err error)
Close() error
}
diff --git a/vendor/github.com/anacrolix/torrent/tracker/http/scrape.go b/vendor/github.com/anacrolix/torrent/tracker/http/scrape.go
new file mode 100644
index 0000000000..6940370f54
--- /dev/null
+++ b/vendor/github.com/anacrolix/torrent/tracker/http/scrape.go
@@ -0,0 +1,47 @@
+package httpTracker
+
+import (
+ "context"
+ "log"
+ "net/http"
+ "net/url"
+
+ "github.com/anacrolix/torrent/bencode"
+ "github.com/anacrolix/torrent/tracker/udp"
+ "github.com/anacrolix/torrent/types/infohash"
+)
+
+type scrapeResponse struct {
+ Files files `bencode:"files"`
+}
+
+// Bencode should support bencode.Unmarshalers from a string in the dict key position.
+type files = map[string]udp.ScrapeInfohashResult
+
+func (cl Client) Scrape(ctx context.Context, ihs []infohash.T) (out udp.ScrapeResponse, err error) {
+ _url := cl.url_.JoinPath("..", "scrape")
+ query, err := url.ParseQuery(_url.RawQuery)
+ if err != nil {
+ return
+ }
+ for _, ih := range ihs {
+ query.Add("info_hash", ih.AsString())
+ }
+ _url.RawQuery = query.Encode()
+ log.Printf("%q", _url.String())
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, _url.String(), nil)
+ if err != nil {
+ return
+ }
+ resp, err := cl.hc.Do(req)
+ if err != nil {
+ return
+ }
+ defer resp.Body.Close()
+ var decodedResp scrapeResponse
+ err = bencode.NewDecoder(resp.Body).Decode(&decodedResp)
+ for _, ih := range ihs {
+ out = append(out, decodedResp.Files[ih.AsString()])
+ }
+ return
+}
diff --git a/vendor/github.com/anacrolix/torrent/tracker/udp.go b/vendor/github.com/anacrolix/torrent/tracker/udp.go
index db486948a6..cf68188751 100644
--- a/vendor/github.com/anacrolix/torrent/tracker/udp.go
+++ b/vendor/github.com/anacrolix/torrent/tracker/udp.go
@@ -4,8 +4,11 @@ import (
"context"
"encoding/binary"
+ "github.com/anacrolix/generics"
+
trHttp "github.com/anacrolix/torrent/tracker/http"
"github.com/anacrolix/torrent/tracker/udp"
+ "github.com/anacrolix/torrent/types/infohash"
)
type udpClient struct {
@@ -13,6 +16,15 @@ type udpClient struct {
requestUri string
}
+func (c *udpClient) Scrape(ctx context.Context, ihs []infohash.T) (out udp.ScrapeResponse, err error) {
+ return c.cl.Client.Scrape(
+ ctx,
+ generics.SliceMap(ihs, func(from infohash.T) udp.InfoHash {
+ return from
+ }),
+ )
+}
+
func (c *udpClient) Close() error {
return c.cl.Close()
}
diff --git a/vendor/github.com/anacrolix/torrent/tracker/udp/scrape.go b/vendor/github.com/anacrolix/torrent/tracker/udp/scrape.go
index 331f109e62..13a69b9919 100644
--- a/vendor/github.com/anacrolix/torrent/tracker/udp/scrape.go
+++ b/vendor/github.com/anacrolix/torrent/tracker/udp/scrape.go
@@ -5,7 +5,9 @@ type ScrapeRequest []InfoHash
type ScrapeResponse []ScrapeInfohashResult
type ScrapeInfohashResult struct {
- Seeders int32
- Completed int32
- Leechers int32
+ // I'm not sure why the fields are named differently for HTTP scrapes.
+ // https://www.bittorrent.org/beps/bep_0048.html
+ Seeders int32 `bencode:"complete"`
+ Completed int32 `bencode:"downloaded"`
+ Leechers int32 `bencode:"incomplete"`
}
diff --git a/vendor/github.com/cockroachdb/pebble/compaction.go b/vendor/github.com/cockroachdb/pebble/compaction.go
index 8382c599f7..8efeb2aaa4 100644
--- a/vendor/github.com/cockroachdb/pebble/compaction.go
+++ b/vendor/github.com/cockroachdb/pebble/compaction.go
@@ -32,7 +32,10 @@ import (
)
var errEmptyTable = errors.New("pebble: empty table")
-var errCancelledCompaction = errors.New("pebble: compaction cancelled by a concurrent operation, will retry compaction")
+
+// ErrCancelledCompaction is returned if a compaction is cancelled by a
+// concurrent excise or ingest-split operation.
+var ErrCancelledCompaction = errors.New("pebble: compaction cancelled by a concurrent operation, will retry compaction")
var compactLabels = pprof.Labels("pebble", "compact")
var flushLabels = pprof.Labels("pebble", "flush")
@@ -1337,7 +1340,7 @@ func (c *compaction) newInputIter(
// initRangeDel, the levelIter will close and forget the range
// deletion iterator when it steps on to a new file. Surfacing range
// deletions to compactions are handled below.
- iters = append(iters, newLevelIter(iterOpts, c.cmp, nil /* split */, newIters,
+ iters = append(iters, newLevelIter(iterOpts, c.comparer, newIters,
level.files.Iter(), l, internalIterOpts{
bytesIterated: &c.bytesIterated,
bufferPool: &c.bufferPool,
@@ -1905,15 +1908,27 @@ func (d *DB) runIngestFlush(c *compaction) (*manifest.VersionEdit, error) {
ve := &versionEdit{}
var level int
var err error
+ var fileToSplit *fileMetadata
+ var ingestSplitFiles []ingestSplitFile
for _, file := range c.flushing[0].flushable.(*ingestedFlushable).files {
- level, err = ingestTargetLevel(
- d.newIters, d.tableNewRangeKeyIter, iterOpts, d.cmp,
+ suggestSplit := d.opts.Experimental.IngestSplit != nil && d.opts.Experimental.IngestSplit() &&
+ d.FormatMajorVersion() >= FormatVirtualSSTables
+ level, fileToSplit, err = ingestTargetLevel(
+ d.newIters, d.tableNewRangeKeyIter, iterOpts, d.opts.Comparer,
c.version, baseLevel, d.mu.compact.inProgress, file.FileMetadata,
+ suggestSplit,
)
if err != nil {
return nil, err
}
ve.NewFiles = append(ve.NewFiles, newFileEntry{Level: level, Meta: file.FileMetadata})
+ if fileToSplit != nil {
+ ingestSplitFiles = append(ingestSplitFiles, ingestSplitFile{
+ ingestFile: file.FileMetadata,
+ splitFile: fileToSplit,
+ level: level,
+ })
+ }
levelMetrics := c.metrics[level]
if levelMetrics == nil {
levelMetrics = &LevelMetrics{}
@@ -1923,6 +1938,28 @@ func (d *DB) runIngestFlush(c *compaction) (*manifest.VersionEdit, error) {
levelMetrics.TablesIngested++
}
+ updateLevelMetricsOnExcise := func(m *fileMetadata, level int, added []newFileEntry) {
+ levelMetrics := c.metrics[level]
+ if levelMetrics == nil {
+ levelMetrics = &LevelMetrics{}
+ c.metrics[level] = levelMetrics
+ }
+ levelMetrics.NumFiles--
+ levelMetrics.Size -= int64(m.Size)
+ for i := range added {
+ levelMetrics.NumFiles++
+ levelMetrics.Size += int64(added[i].Meta.Size)
+ }
+ }
+
+ if len(ingestSplitFiles) > 0 {
+ ve.DeletedFiles = make(map[manifest.DeletedFileEntry]*manifest.FileMetadata)
+ replacedFiles := make(map[base.FileNum][]newFileEntry)
+ if err := d.ingestSplit(ve, updateLevelMetricsOnExcise, ingestSplitFiles, replacedFiles); err != nil {
+ return nil, err
+ }
+ }
+
return ve, nil
}
@@ -2093,6 +2130,24 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) {
metrics.BytesIn += d.mu.mem.queue[i].logSize
}
}
+ } else if len(ve.DeletedFiles) > 0 {
+ // c.kind == compactionKindIngestedFlushable && we have deleted files due
+ // to ingest-time splits.
+ //
+ // Iterate through all other compactions, and check if their inputs have
+ // been replaced due to an ingest-time split. In that case, cancel the
+ // compaction.
+ for c2 := range d.mu.compact.inProgress {
+ for i := range c2.inputs {
+ iter := c2.inputs[i].files.Iter()
+ for f := iter.First(); f != nil; f = iter.Next() {
+ if _, ok := ve.DeletedFiles[deletedFileEntry{FileNum: f.FileNum, Level: c2.inputs[i].level}]; ok {
+ c2.cancel.Store(true)
+ break
+ }
+ }
+ }
+ }
}
err = d.mu.versions.logAndApply(jobID, ve, c.metrics, false, /* forceRotation */
func() []compactionInfo { return d.getInProgressCompactionInfoLocked(c) })
@@ -2654,7 +2709,7 @@ func (d *DB) compact1(c *compaction, errChannel chan error) (err error) {
// the manifest lock, we don't expect this bool to change its value
// as only the holder of the manifest lock will ever write to it.
if c.cancel.Load() {
- err = firstError(err, errCancelledCompaction)
+ err = firstError(err, ErrCancelledCompaction)
}
if err != nil {
// logAndApply calls logUnlock. If we didn't call it, we need to call
@@ -2819,7 +2874,7 @@ func (d *DB) runCompaction(
}
if c.cancel.Load() {
- return ve, nil, stats, errCancelledCompaction
+ return ve, nil, stats, ErrCancelledCompaction
}
// Release the d.mu lock while doing I/O.
@@ -2952,7 +3007,7 @@ func (d *DB) runCompaction(
newOutput := func() error {
// Check if we've been cancelled by a concurrent operation.
if c.cancel.Load() {
- return errCancelledCompaction
+ return ErrCancelledCompaction
}
fileMeta := &fileMetadata{}
d.mu.Lock()
diff --git a/vendor/github.com/cockroachdb/pebble/compaction_picker.go b/vendor/github.com/cockroachdb/pebble/compaction_picker.go
index 1d9467aaea..7df439ecac 100644
--- a/vendor/github.com/cockroachdb/pebble/compaction_picker.go
+++ b/vendor/github.com/cockroachdb/pebble/compaction_picker.go
@@ -90,18 +90,38 @@ func (info compactionInfo) String() string {
return buf.String()
}
-type sortCompactionLevelsDecreasingScore []candidateLevelInfo
+type sortCompactionLevelsByPriority []candidateLevelInfo
-func (s sortCompactionLevelsDecreasingScore) Len() int {
+func (s sortCompactionLevelsByPriority) Len() int {
return len(s)
}
-func (s sortCompactionLevelsDecreasingScore) Less(i, j int) bool {
- if s[i].score != s[j].score {
- return s[i].score > s[j].score
+
+// A level should be picked for compaction if the compensatedScoreRatio is >= the
+// compactionScoreThreshold.
+const compactionScoreThreshold = 1
+
+// Less should return true if s[i] must be placed earlier than s[j] in the final
+// sorted list. The candidateLevelInfo for the level placed earlier is more likely
+// to be picked for a compaction.
+func (s sortCompactionLevelsByPriority) Less(i, j int) bool {
+ iShouldCompact := s[i].compensatedScoreRatio >= compactionScoreThreshold
+ jShouldCompact := s[j].compensatedScoreRatio >= compactionScoreThreshold
+ // Ordering is defined as decreasing on (shouldCompact, uncompensatedScoreRatio)
+ // where shouldCompact is 1 for true and 0 for false.
+ if iShouldCompact && !jShouldCompact {
+ return true
+ }
+ if !iShouldCompact && jShouldCompact {
+ return false
+ }
+
+ if s[i].uncompensatedScoreRatio != s[j].uncompensatedScoreRatio {
+ return s[i].uncompensatedScoreRatio > s[j].uncompensatedScoreRatio
}
return s[i].level < s[j].level
}
-func (s sortCompactionLevelsDecreasingScore) Swap(i, j int) {
+
+func (s sortCompactionLevelsByPriority) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
@@ -153,6 +173,7 @@ func generateSublevelInfo(cmp base.Compare, levelFiles manifest.LevelSlice) []su
// compactionPickerMetrics holds metrics related to the compaction picking process
type compactionPickerMetrics struct {
+ // scores contains the compensatedScoreRatio from the candidateLevelInfo.
scores []float64
singleLevelOverlappingRatio float64
multiLevelOverlappingRatio float64
@@ -164,7 +185,8 @@ type compactionPickerMetrics struct {
// created.
type pickedCompaction struct {
cmp Compare
- // score of the chosen compaction. Taken from candidateLevelInfo.
+ // score of the chosen compaction. This is the same as the
+ // compensatedScoreRatio in the candidateLevelInfo.
score float64
// kind indicates the kind of compaction.
kind compactionKind
@@ -664,16 +686,23 @@ func newCompactionPicker(
// Information about a candidate compaction level that has been identified by
// the compaction picker.
type candidateLevelInfo struct {
- // The score of the level to be compacted, with compensated file sizes and
- // adjustments.
- score float64
- // The original score of the level to be compacted, before adjusting
- // according to other levels' sizes.
- origScore float64
- // The raw score of the level to be compacted, calculated using
- // uncompensated file sizes and without any adjustments.
- rawScore float64
- level int
+ // The compensatedScore of the level after adjusting according to the other
+ // levels' sizes. For L0, the compensatedScoreRatio is equivalent to the
+ // uncompensatedScoreRatio as we don't account for level size compensation in
+ // L0.
+ compensatedScoreRatio float64
+ // The score of the level after accounting for level size compensation before
+ // adjusting according to other levels' sizes. For L0, the compensatedScore
+ // is equivalent to the uncompensatedScore as we don't account for level
+ // size compensation in L0.
+ compensatedScore float64
+ // The score of the level to be compacted, calculated using uncompensated file
+ // sizes and without any adjustments.
+ uncompensatedScore float64
+ // uncompensatedScoreRatio is the uncompensatedScore adjusted according to
+ // the other levels' sizes.
+ uncompensatedScoreRatio float64
+ level int
// The level to compact to.
outputLevel int
// The file in level that will be compacted. Additional files may be
@@ -682,6 +711,10 @@ type candidateLevelInfo struct {
file manifest.LevelFile
}
+func (c *candidateLevelInfo) shouldCompact() bool {
+ return c.compensatedScoreRatio >= compactionScoreThreshold
+}
+
func fileCompensation(f *fileMetadata) uint64 {
return uint64(f.Stats.PointDeletionsBytesEstimate) + f.Stats.RangeDeletionsBytesEstimate
}
@@ -759,7 +792,7 @@ var _ compactionPicker = &compactionPickerByScore{}
func (p *compactionPickerByScore) getScores(inProgress []compactionInfo) [numLevels]float64 {
var scores [numLevels]float64
for _, info := range p.calculateLevelScores(inProgress) {
- scores[info.level] = info.score
+ scores[info.level] = info.compensatedScoreRatio
}
return scores
}
@@ -978,68 +1011,83 @@ func (p *compactionPickerByScore) calculateLevelScores(
scores[i].level = i
scores[i].outputLevel = i + 1
}
+ l0UncompensatedScore := calculateL0UncompensatedScore(p.vers, p.opts, inProgressCompactions)
scores[0] = candidateLevelInfo{
- outputLevel: p.baseLevel,
- score: calculateL0Score(p.vers, p.opts, inProgressCompactions),
+ outputLevel: p.baseLevel,
+ uncompensatedScore: l0UncompensatedScore,
+ compensatedScore: l0UncompensatedScore, /* No level size compensation for L0 */
}
sizeAdjust := calculateSizeAdjust(inProgressCompactions)
for level := 1; level < numLevels; level++ {
compensatedLevelSize := levelCompensatedSize(p.vers.Levels[level]) + sizeAdjust[level].compensated()
- scores[level].score = float64(compensatedLevelSize) / float64(p.levelMaxBytes[level])
- scores[level].origScore = scores[level].score
-
- // In addition to the compensated score, we calculate a separate score
- // that uses actual file sizes, not compensated sizes. This is used
- // during score smoothing down below to prevent excessive
- // prioritization of reclaiming disk space.
- scores[level].rawScore = float64(p.vers.Levels[level].Size()+sizeAdjust[level].actual()) / float64(p.levelMaxBytes[level])
+ scores[level].compensatedScore = float64(compensatedLevelSize) / float64(p.levelMaxBytes[level])
+ scores[level].uncompensatedScore = float64(p.vers.Levels[level].Size()+sizeAdjust[level].actual()) / float64(p.levelMaxBytes[level])
}
- // Adjust each level's score by the score of the next level. If the next
- // level has a high score, and is thus a priority for compaction, this
- // reduces the priority for compacting the current level. If the next level
- // has a low score (i.e. it is below its target size), this increases the
- // priority for compacting the current level.
+ // Adjust each level's {compensated, uncompensated}Score by the uncompensatedScore
+ // of the next level to get a {compensated, uncompensated}ScoreRatio. If the
+ // next level has a high uncompensatedScore, and is thus a priority for compaction,
+ // this reduces the priority for compacting the current level. If the next level
+ // has a low uncompensatedScore (i.e. it is below its target size), this increases
+ // the priority for compacting the current level.
//
// The effect of this adjustment is to help prioritize compactions in lower
- // levels. The following shows the new score and original score. In this
- // scenario, L0 has 68 sublevels. L3 (a.k.a. Lbase) is significantly above
- // its target size. The original score prioritizes compactions from those two
- // levels, but doing so ends up causing a future problem: data piles up in
- // the higher levels, starving L5->L6 compactions, and to a lesser degree
- // starving L4->L5 compactions.
+ // levels. The following example shows the compensatedScoreRatio and the
+ // compensatedScore. In this scenario, L0 has 68 sublevels. L3 (a.k.a. Lbase)
+ // is significantly above its target size. The original score prioritizes
+ // compactions from those two levels, but doing so ends up causing a future
+ // problem: data piles up in the higher levels, starving L5->L6 compactions,
+ // and to a lesser degree starving L4->L5 compactions.
+ //
+ // Note that in the example shown there is no level size compensation so the
+ // compensatedScore and the uncompensatedScore is the same for each level.
//
- // adjusted original
- // score score size max-size
- // L0 3.2 68.0 2.2 G -
- // L3 3.2 21.1 1.3 G 64 M
- // L4 3.4 6.7 3.1 G 467 M
- // L5 3.4 2.0 6.6 G 3.3 G
- // L6 0.6 0.6 14 G 24 G
+ // compensatedScoreRatio compensatedScore uncompensatedScore size max-size
+ // L0 3.2 68.0 68.0 2.2 G -
+ // L3 3.2 21.1 21.1 1.3 G 64 M
+ // L4 3.4 6.7 6.7 3.1 G 467 M
+ // L5 3.4 2.0 2.0 6.6 G 3.3 G
+ // L6 0.6 0.6 0.6 14 G 24 G
var prevLevel int
for level := p.baseLevel; level < numLevels; level++ {
- if scores[prevLevel].score >= 1 {
- // Avoid absurdly large scores by placing a floor on the score that we'll
- // adjust a level by. The value of 0.01 was chosen somewhat arbitrarily
- const minScore = 0.01
- if scores[level].rawScore >= minScore {
- scores[prevLevel].score /= scores[level].rawScore
+ // The compensated scores, and uncompensated scores will be turned into
+ // ratios as they're adjusted according to other levels' sizes.
+ scores[prevLevel].compensatedScoreRatio = scores[prevLevel].compensatedScore
+ scores[prevLevel].uncompensatedScoreRatio = scores[prevLevel].uncompensatedScore
+
+ // Avoid absurdly large scores by placing a floor on the score that we'll
+ // adjust a level by. The value of 0.01 was chosen somewhat arbitrarily.
+ const minScore = 0.01
+ if scores[prevLevel].compensatedScoreRatio >= compactionScoreThreshold {
+ if scores[level].uncompensatedScore >= minScore {
+ scores[prevLevel].compensatedScoreRatio /= scores[level].uncompensatedScore
+ } else {
+ scores[prevLevel].compensatedScoreRatio /= minScore
+ }
+ }
+ if scores[prevLevel].uncompensatedScoreRatio >= compactionScoreThreshold {
+ if scores[level].uncompensatedScore >= minScore {
+ scores[prevLevel].uncompensatedScoreRatio /= scores[level].uncompensatedScore
} else {
- scores[prevLevel].score /= minScore
+ scores[prevLevel].uncompensatedScoreRatio /= minScore
}
}
prevLevel = level
}
+ // Set the score ratios for the lowest level.
+ // INVARIANT: prevLevel == numLevels-1
+ scores[prevLevel].compensatedScoreRatio = scores[prevLevel].compensatedScore
+ scores[prevLevel].uncompensatedScoreRatio = scores[prevLevel].uncompensatedScore
- sort.Sort(sortCompactionLevelsDecreasingScore(scores[:]))
+ sort.Sort(sortCompactionLevelsByPriority(scores[:]))
return scores
}
-// calculateL0Score calculates a float score representing the relative priority
-// of compacting L0. Level L0 is special in that files within L0 may overlap one
-// another, so a different set of heuristics that take into account
-// read amplification apply.
-func calculateL0Score(
+// calculateL0UncompensatedScore calculates a float score representing the
+// relative priority of compacting L0. Level L0 is special in that files within
+// L0 may overlap one another, so a different set of heuristics that take into
+// account read amplification apply.
+func calculateL0UncompensatedScore(
vers *version, opts *Options, inProgressCompactions []compactionInfo,
) float64 {
// Use the sublevel count to calculate the score. The base vs intra-L0
@@ -1242,8 +1290,9 @@ func (p *compactionPickerByScore) pickAuto(env compactionEnv) (pc *pickedCompact
if pc.startLevel.level == info.level {
marker = "*"
}
- fmt.Fprintf(&buf, " %sL%d: %5.1f %5.1f %5.1f %8s %8s",
- marker, info.level, info.score, info.origScore, info.rawScore,
+ fmt.Fprintf(&buf, " %sL%d: %5.1f %5.1f %5.1f %5.1f %8s %8s",
+ marker, info.level, info.compensatedScoreRatio, info.compensatedScore,
+ info.uncompensatedScoreRatio, info.uncompensatedScore,
humanize.Bytes.Int64(int64(totalCompensatedSize(
p.vers.Levels[info.level].Iter(),
))),
@@ -1273,12 +1322,12 @@ func (p *compactionPickerByScore) pickAuto(env compactionEnv) (pc *pickedCompact
pc.startLevel.level, pc.outputLevel.level, buf.String())
}
- // Check for a score-based compaction. "scores" has been sorted in order of
- // decreasing score. For each level with a score >= 1, we attempt to find a
- // compaction anchored at at that level.
+ // Check for a score-based compaction. candidateLevelInfos are first sorted
+ // by whether they should be compacted, so if we find a level which shouldn't
+ // be compacted, we can break early.
for i := range scores {
info := &scores[i]
- if info.score < 1 {
+ if !info.shouldCompact() {
break
}
if info.level == numLevels-1 {
@@ -1291,7 +1340,7 @@ func (p *compactionPickerByScore) pickAuto(env compactionEnv) (pc *pickedCompact
// concurrently.
if pc != nil && !inputRangeAlreadyCompacting(env, pc) {
p.addScoresToPickedCompactionMetrics(pc, scores)
- pc.score = info.score
+ pc.score = info.compensatedScoreRatio
// TODO(bananabrick): Create an EventListener for logCompaction.
if false {
logCompaction(pc)
@@ -1312,7 +1361,7 @@ func (p *compactionPickerByScore) pickAuto(env compactionEnv) (pc *pickedCompact
// Fail-safe to protect against compacting the same sstable concurrently.
if pc != nil && !inputRangeAlreadyCompacting(env, pc) {
p.addScoresToPickedCompactionMetrics(pc, scores)
- pc.score = info.score
+ pc.score = info.compensatedScoreRatio
// TODO(bananabrick): Create an EventListener for logCompaction.
if false {
logCompaction(pc)
@@ -1388,7 +1437,7 @@ func (p *compactionPickerByScore) addScoresToPickedCompactionMetrics(
inputIdx := 0
for i := range infoByLevel {
if pc.inputs[inputIdx].level == infoByLevel[i].level {
- pc.pickerMetrics.scores[inputIdx] = infoByLevel[i].score
+ pc.pickerMetrics.scores[inputIdx] = infoByLevel[i].compensatedScoreRatio
inputIdx++
}
if inputIdx == len(pc.inputs) {
diff --git a/vendor/github.com/cockroachdb/pebble/db.go b/vendor/github.com/cockroachdb/pebble/db.go
index ac9372a781..80bef7ee16 100644
--- a/vendor/github.com/cockroachdb/pebble/db.go
+++ b/vendor/github.com/cockroachdb/pebble/db.go
@@ -559,9 +559,7 @@ func (d *DB) getInternal(key []byte, b *Batch, s *Snapshot) ([]byte, io.Closer,
get := &buf.get
*get = getIter{
logger: d.opts.Logger,
- cmp: d.cmp,
- equal: d.equal,
- split: d.split,
+ comparer: d.opts.Comparer,
newIters: d.newIters,
snapshot: seqNum,
key: key,
@@ -1418,8 +1416,7 @@ func (i *Iterator) constructPointIter(
addLevelIterForFiles := func(files manifest.LevelIterator, level manifest.Level) {
li := &levels[levelsIndex]
- li.init(
- ctx, i.opts, i.comparer.Compare, i.comparer.Split, i.newIters, files, level, internalOpts)
+ li.init(ctx, i.opts, &i.comparer, i.newIters, files, level, internalOpts)
li.initRangeDel(&mlevels[mlevelsIndex].rangeDelIter)
li.initBoundaryContext(&mlevels[mlevelsIndex].levelIterBoundaryContext)
li.initCombinedIterState(&i.lazyCombinedIter.combinedIterState)
diff --git a/vendor/github.com/cockroachdb/pebble/flushable.go b/vendor/github.com/cockroachdb/pebble/flushable.go
index cdc679923f..09abee385b 100644
--- a/vendor/github.com/cockroachdb/pebble/flushable.go
+++ b/vendor/github.com/cockroachdb/pebble/flushable.go
@@ -117,8 +117,7 @@ type flushableList []*flushableEntry
// ingesting sstables which are added to the flushable list.
type ingestedFlushable struct {
files []physicalMeta
- cmp Compare
- split Split
+ comparer *Comparer
newIters tableNewIters
newRangeKeyIters keyspan.TableNewSpanIter
@@ -131,8 +130,7 @@ type ingestedFlushable struct {
func newIngestedFlushable(
files []*fileMetadata,
- cmp Compare,
- split Split,
+ comparer *Comparer,
newIters tableNewIters,
newRangeKeyIters keyspan.TableNewSpanIter,
) *ingestedFlushable {
@@ -147,12 +145,11 @@ func newIngestedFlushable(
ret := &ingestedFlushable{
files: physicalFiles,
- cmp: cmp,
- split: split,
+ comparer: comparer,
newIters: newIters,
newRangeKeyIters: newRangeKeyIters,
// slice is immutable and can be set once and used many times.
- slice: manifest.NewLevelSliceKeySorted(cmp, files),
+ slice: manifest.NewLevelSliceKeySorted(comparer.Compare, files),
hasRangeKeys: hasRangeKeys,
}
@@ -173,7 +170,7 @@ func (s *ingestedFlushable) newIter(o *IterOptions) internalIterator {
// aren't truly levels in the lsm. Right now, the encoding only supports
// L0 sublevels, and the rest of the levels in the lsm.
return newLevelIter(
- opts, s.cmp, s.split, s.newIters, s.slice.Iter(), manifest.Level(0), internalIterOpts{},
+ opts, s.comparer, s.newIters, s.slice.Iter(), manifest.Level(0), internalIterOpts{},
)
}
@@ -206,7 +203,7 @@ func (s *ingestedFlushable) constructRangeDelIter(
// surface range deletes is more efficient.
func (s *ingestedFlushable) newRangeDelIter(_ *IterOptions) keyspan.FragmentIterator {
return keyspan.NewLevelIter(
- keyspan.SpanIterOptions{}, s.cmp,
+ keyspan.SpanIterOptions{}, s.comparer.Compare,
s.constructRangeDelIter, s.slice.Iter(), manifest.Level(0),
manifest.KeyTypePoint,
)
@@ -219,7 +216,7 @@ func (s *ingestedFlushable) newRangeKeyIter(o *IterOptions) keyspan.FragmentIter
}
return keyspan.NewLevelIter(
- keyspan.SpanIterOptions{}, s.cmp, s.newRangeKeyIters,
+ keyspan.SpanIterOptions{}, s.comparer.Compare, s.newRangeKeyIters,
s.slice.Iter(), manifest.Level(0), manifest.KeyTypeRange,
)
}
diff --git a/vendor/github.com/cockroachdb/pebble/get_iter.go b/vendor/github.com/cockroachdb/pebble/get_iter.go
index ad814188d9..99c5d7c52c 100644
--- a/vendor/github.com/cockroachdb/pebble/get_iter.go
+++ b/vendor/github.com/cockroachdb/pebble/get_iter.go
@@ -19,9 +19,7 @@ import (
// lazily.
type getIter struct {
logger Logger
- cmp Compare
- equal Equal
- split Split
+ comparer *Comparer
newIters tableNewIters
snapshot uint64
key []byte
@@ -84,7 +82,7 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) {
// key. Every call to levelIter.Next() potentially switches to a new
// table and thus reinitializes rangeDelIter.
if g.rangeDelIter != nil {
- g.tombstone = keyspan.Get(g.cmp, g.rangeDelIter, g.key)
+ g.tombstone = keyspan.Get(g.comparer.Compare, g.rangeDelIter, g.key)
if g.err = g.rangeDelIter.Close(); g.err != nil {
return nil, base.LazyValue{}
}
@@ -102,7 +100,7 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) {
g.iter = nil
return nil, base.LazyValue{}
}
- if g.equal(g.key, key.UserKey) {
+ if g.comparer.Equal(g.key, key.UserKey) {
if !key.Visible(g.snapshot, base.InternalKeySeqNumMax) {
g.iterKey, g.iterValue = g.iter.Next()
continue
@@ -160,7 +158,7 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) {
files := g.l0[n-1].Iter()
g.l0 = g.l0[:n-1]
iterOpts := IterOptions{logger: g.logger, snapshotForHideObsoletePoints: g.snapshot}
- g.levelIter.init(context.Background(), iterOpts, g.cmp, g.split, g.newIters,
+ g.levelIter.init(context.Background(), iterOpts, g.comparer, g.newIters,
files, manifest.L0Sublevel(n), internalIterOpts{})
g.levelIter.initRangeDel(&g.rangeDelIter)
bc := levelIterBoundaryContext{}
@@ -170,8 +168,8 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) {
// Compute the key prefix for bloom filtering if split function is
// specified, or use the user key as default.
prefix := g.key
- if g.split != nil {
- prefix = g.key[:g.split(g.key)]
+ if g.comparer.Split != nil {
+ prefix = g.key[:g.comparer.Split(g.key)]
}
g.iterKey, g.iterValue = g.iter.SeekPrefixGE(prefix, g.key, base.SeekGEFlagsNone)
if bc.isSyntheticIterBoundsKey || bc.isIgnorableBoundaryKey {
@@ -192,7 +190,7 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) {
}
iterOpts := IterOptions{logger: g.logger, snapshotForHideObsoletePoints: g.snapshot}
- g.levelIter.init(context.Background(), iterOpts, g.cmp, g.split, g.newIters,
+ g.levelIter.init(context.Background(), iterOpts, g.comparer, g.newIters,
g.version.Levels[g.level].Iter(), manifest.Level(g.level), internalIterOpts{})
g.levelIter.initRangeDel(&g.rangeDelIter)
bc := levelIterBoundaryContext{}
@@ -203,8 +201,8 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) {
// Compute the key prefix for bloom filtering if split function is
// specified, or use the user key as default.
prefix := g.key
- if g.split != nil {
- prefix = g.key[:g.split(g.key)]
+ if g.comparer.Split != nil {
+ prefix = g.key[:g.comparer.Split(g.key)]
}
g.iterKey, g.iterValue = g.iter.SeekPrefixGE(prefix, g.key, base.SeekGEFlagsNone)
if bc.isSyntheticIterBoundsKey || bc.isIgnorableBoundaryKey {
diff --git a/vendor/github.com/cockroachdb/pebble/ingest.go b/vendor/github.com/cockroachdb/pebble/ingest.go
index e650ac7702..da34783442 100644
--- a/vendor/github.com/cockroachdb/pebble/ingest.go
+++ b/vendor/github.com/cockroachdb/pebble/ingest.go
@@ -797,16 +797,21 @@ func overlapWithIterator(
return computeOverlapWithSpans(*rangeDelIter)
}
+// ingestTargetLevel returns the target level for a file being ingested.
+// If suggestSplit is true, it accounts for ingest-time splitting as part of
+// its target level calculation, and if a split candidate is found, that file
+// is returned as the splitFile.
func ingestTargetLevel(
newIters tableNewIters,
newRangeKeyIter keyspan.TableNewSpanIter,
iterOps IterOptions,
- cmp Compare,
+ comparer *Comparer,
v *version,
baseLevel int,
compactions map[*compaction]struct{},
meta *fileMetadata,
-) (int, error) {
+ suggestSplit bool,
+) (targetLevel int, splitFile *fileMetadata, err error) {
// Find the lowest level which does not have any files which overlap meta. We
// search from L0 to L6 looking for whether there are any files in the level
// which overlap meta. We want the "lowest" level (where lower means
@@ -821,6 +826,14 @@ func ingestTargetLevel(
// violate the sequence number invariant.
// - no file boundary overlap with level i, since that will violate the
// invariant that files do not overlap in levels i > 0.
+ // - if there is only a file overlap at a given level, and no data overlap,
+ // we can still slot a file at that level. We return the fileMetadata with
+ // which we have file boundary overlap (must be only one file, as sstable
+ // bounds are usually tight on user keys) and the caller is expected to split
+ // that sstable into two virtual sstables, allowing this file to go into that
+ // level. Note that if we have file boundary overlap with two files, which
+ // should only happen on rare occasions, we treat it as data overlap and
+ // don't use this optimization.
//
// The file boundary overlap check is simpler to conceptualize. Consider the
// following example, in which the ingested file lies completely before or
@@ -865,16 +878,14 @@ func ingestTargetLevel(
// existing point that falls within the ingested table bounds as being "data
// overlap".
- targetLevel := 0
-
// This assertion implicitly checks that we have the current version of
// the metadata.
if v.L0Sublevels == nil {
- return 0, errors.AssertionFailedf("could not read L0 sublevels")
+ return 0, nil, errors.AssertionFailedf("could not read L0 sublevels")
}
// Check for overlap over the keys of L0 by iterating over the sublevels.
for subLevel := 0; subLevel < len(v.L0SublevelFiles); subLevel++ {
- iter := newLevelIter(iterOps, cmp, nil /* split */, newIters,
+ iter := newLevelIter(iterOps, comparer, newIters,
v.L0Sublevels.Levels[subLevel].Iter(), manifest.Level(0), internalIterOpts{})
var rangeDelIter keyspan.FragmentIterator
@@ -884,7 +895,7 @@ func ingestTargetLevel(
levelIter := keyspan.LevelIter{}
levelIter.Init(
- keyspan.SpanIterOptions{}, cmp, newRangeKeyIter,
+ keyspan.SpanIterOptions{}, comparer.Compare, newRangeKeyIter,
v.L0Sublevels.Levels[subLevel].Iter(), manifest.Level(0), manifest.KeyTypeRange,
)
@@ -892,20 +903,20 @@ func ingestTargetLevel(
smallest: meta.Smallest,
largest: meta.Largest,
}
- overlap := overlapWithIterator(iter, &rangeDelIter, &levelIter, kr, cmp)
+ overlap := overlapWithIterator(iter, &rangeDelIter, &levelIter, kr, comparer.Compare)
err := iter.Close() // Closes range del iter as well.
err = firstError(err, levelIter.Close())
if err != nil {
- return 0, err
+ return 0, nil, err
}
if overlap {
- return targetLevel, nil
+ return targetLevel, nil, nil
}
}
level := baseLevel
for ; level < numLevels; level++ {
- levelIter := newLevelIter(iterOps, cmp, nil /* split */, newIters,
+ levelIter := newLevelIter(iterOps, comparer, newIters,
v.Levels[level].Iter(), manifest.Level(level), internalIterOpts{})
var rangeDelIter keyspan.FragmentIterator
// Pass in a non-nil pointer to rangeDelIter so that levelIter.findFileGE
@@ -914,7 +925,7 @@ func ingestTargetLevel(
rkeyLevelIter := &keyspan.LevelIter{}
rkeyLevelIter.Init(
- keyspan.SpanIterOptions{}, cmp, newRangeKeyIter,
+ keyspan.SpanIterOptions{}, comparer.Compare, newRangeKeyIter,
v.Levels[level].Iter(), manifest.Level(level), manifest.KeyTypeRange,
)
@@ -922,46 +933,68 @@ func ingestTargetLevel(
smallest: meta.Smallest,
largest: meta.Largest,
}
- overlap := overlapWithIterator(levelIter, &rangeDelIter, rkeyLevelIter, kr, cmp)
+ overlap := overlapWithIterator(levelIter, &rangeDelIter, rkeyLevelIter, kr, comparer.Compare)
err := levelIter.Close() // Closes range del iter as well.
err = firstError(err, rkeyLevelIter.Close())
if err != nil {
- return 0, err
+ return 0, nil, err
}
if overlap {
- return targetLevel, nil
+ return targetLevel, splitFile, nil
}
// Check boundary overlap.
- boundaryOverlaps := v.Overlaps(level, cmp, meta.Smallest.UserKey,
+ var candidateSplitFile *fileMetadata
+ boundaryOverlaps := v.Overlaps(level, comparer.Compare, meta.Smallest.UserKey,
meta.Largest.UserKey, meta.Largest.IsExclusiveSentinel())
if !boundaryOverlaps.Empty() {
- continue
+ // We are already guaranteed to not have any data overlaps with files
+ // in boundaryOverlaps, otherwise we'd have returned in the above if
+ // statements. Use this, plus boundaryOverlaps.Len() == 1 to detect for
+ // the case where we can slot this file into the current level despite
+ // a boundary overlap, by splitting one existing file into two virtual
+ // sstables.
+ if suggestSplit && boundaryOverlaps.Len() == 1 {
+ iter := boundaryOverlaps.Iter()
+ candidateSplitFile = iter.First()
+ } else {
+ // We either don't want to suggest ingest-time splits (i.e.
+ // !suggestSplit), or we boundary-overlapped with more than one file.
+ continue
+ }
}
- // Check boundary overlap with any ongoing compactions.
+ // Check boundary overlap with any ongoing compactions. We consider an
+ // overlapping compaction that's writing files to an output level as
+ // equivalent to boundary overlap with files in that output level.
//
- // We cannot check for data overlap with the new SSTs compaction will
- // produce since compaction hasn't been done yet. However, there's no need
- // to check since all keys in them will either be from c.startLevel or
- // c.outputLevel, both levels having their data overlap already tested
- // negative (else we'd have returned earlier).
+ // We cannot check for data overlap with the new SSTs compaction will produce
+ // since compaction hasn't been done yet. However, there's no need to check
+ // since all keys in them will be from levels in [c.startLevel,
+ // c.outputLevel], and all those levels have already had their data overlap
+ // tested negative (else we'd have returned earlier).
+ //
+ // An alternative approach would be to cancel these compactions and proceed
+ // with an ingest-time split on this level if necessary. However, compaction
+ // cancellation can result in significant wasted effort and is best avoided
+ // unless necessary.
overlaps := false
for c := range compactions {
if c.outputLevel == nil || level != c.outputLevel.level {
continue
}
- if cmp(meta.Smallest.UserKey, c.largest.UserKey) <= 0 &&
- cmp(meta.Largest.UserKey, c.smallest.UserKey) >= 0 {
+ if comparer.Compare(meta.Smallest.UserKey, c.largest.UserKey) <= 0 &&
+ comparer.Compare(meta.Largest.UserKey, c.smallest.UserKey) >= 0 {
overlaps = true
break
}
}
if !overlaps {
targetLevel = level
+ splitFile = candidateSplitFile
}
}
- return targetLevel, nil
+ return targetLevel, splitFile, nil
}
// Ingest ingests a set of sstables into the DB. Ingestion of the files is
@@ -1143,7 +1176,7 @@ func (d *DB) newIngestedFlushableEntry(
return nil, err
}
- f := newIngestedFlushable(meta, d.cmp, d.split, d.newIters, d.tableNewRangeKeyIter)
+ f := newIngestedFlushable(meta, d.opts.Comparer, d.newIters, d.tableNewRangeKeyIter)
// NB: The logNum/seqNum are the WAL number which we're writing this entry
// to and the sequence number within the WAL which we'll write this entry
@@ -1751,7 +1784,8 @@ func (d *DB) excise(
smallestPointKey.UserKey = firstRangeDel
rightFile.ExtendPointKeyBounds(d.cmp, smallestPointKey, largestPointKey)
}
- } else if m.HasRangeKeys && !exciseSpan.Contains(d.cmp, m.LargestRangeKey) {
+ }
+ if m.HasRangeKeys && !exciseSpan.Contains(d.cmp, m.LargestRangeKey) {
// This file will contain range keys.
largestRangeKey := m.LargestRangeKey
if rangeKeyIter == nil {
@@ -1815,12 +1849,129 @@ type ingestTargetLevelFunc func(
newIters tableNewIters,
newRangeKeyIter keyspan.TableNewSpanIter,
iterOps IterOptions,
- cmp Compare,
+ comparer *Comparer,
v *version,
baseLevel int,
compactions map[*compaction]struct{},
meta *fileMetadata,
-) (int, error)
+ suggestSplit bool,
+) (int, *fileMetadata, error)
+
+type ingestSplitFile struct {
+ // ingestFile is the file being ingested.
+ ingestFile *fileMetadata
+ // splitFile is the file that needs to be split to allow ingestFile to slot
+ // into `level` level.
+ splitFile *fileMetadata
+ // The level where ingestFile will go (and where splitFile already is).
+ level int
+}
+
+// ingestSplit splits files specified in `files` and updates ve in-place to
+// account for existing files getting split into two virtual sstables. The map
+// `replacedFiles` contains an in-progress map of all files that have been
+// replaced with new virtual sstables in this version edit so far, which is also
+// updated in-place.
+//
+// d.mu as well as the manifest lock must be held when calling this method.
+func (d *DB) ingestSplit(
+ ve *versionEdit,
+ updateMetrics func(*fileMetadata, int, []newFileEntry),
+ files []ingestSplitFile,
+ replacedFiles map[base.FileNum][]newFileEntry,
+) error {
+ for _, s := range files {
+ // replacedFiles can be thought of as a tree, where we start iterating with
+ // s.splitFile and run its fileNum through replacedFiles, then find which of
+ // the replaced files overlaps with s.ingestFile, which becomes the new
+ // splitFile, then we check splitFile's replacements in replacedFiles again
+ // for overlap with s.ingestFile, and so on until we either can't find the
+ // current splitFile in replacedFiles (i.e. that's the file that now needs to
+ // be split), or we don't find a file that overlaps with s.ingestFile, which
+ // means a prior ingest split already produced enough room for s.ingestFile
+ // to go into this level without necessitating another ingest split.
+ splitFile := s.splitFile
+ for splitFile != nil {
+ replaced, ok := replacedFiles[splitFile.FileNum]
+ if !ok {
+ break
+ }
+ updatedSplitFile := false
+ for i := range replaced {
+ if replaced[i].Meta.Overlaps(d.cmp, s.ingestFile.Smallest.UserKey, s.ingestFile.Largest.UserKey, s.ingestFile.Largest.IsExclusiveSentinel()) {
+ if updatedSplitFile {
+ // This should never happen because the earlier ingestTargetLevel
+ // function only finds split file candidates that are guaranteed to
+ // have no data overlap, only boundary overlap. See the comments
+ // in that method to see the definitions of data vs boundary
+ // overlap. That, plus the fact that files in `replaced` are
+ // guaranteed to have file bounds that are tight on user keys
+ // (as that's what `d.excise` produces), means that the only case
+ // where we overlap with two or more files in `replaced` is if we
+ // actually had data overlap all along, or if the ingestion files
+ // were overlapping, either of which is an invariant violation.
+ panic("updated with two files in ingestSplit")
+ }
+ splitFile = replaced[i].Meta
+ updatedSplitFile = true
+ }
+ }
+ if !updatedSplitFile {
+ // None of the replaced files overlapped with the file being ingested.
+ // This can happen if we've already excised a span overlapping with
+ // this file, or if we have consecutive ingested files that can slide
+ // within the same gap between keys in an existing file. For instance,
+ // if an existing file has keys a and g and we're ingesting b-c, d-e,
+ // the first loop iteration will split the existing file into one that
+ // ends in a and another that starts at g, and the second iteration will
+ // fall into this case and require no splitting.
+ //
+ // No splitting necessary.
+ splitFile = nil
+ }
+ }
+ if splitFile == nil {
+ continue
+ }
+ // NB: excise operates on [start, end). We're splitting at [start, end]
+ // (assuming !s.ingestFile.Largest.IsExclusiveSentinel()). The conflation
+ // of exclusive vs inclusive end bounds should not make a difference here
+ // as we're guaranteed to not have any data overlap between splitFile and
+ // s.ingestFile, so panic if we do see a newly added file with an endKey
+ // equalling s.ingestFile.Largest, and !s.ingestFile.Largest.IsExclusiveSentinel()
+ added, err := d.excise(KeyRange{Start: s.ingestFile.Smallest.UserKey, End: s.ingestFile.Largest.UserKey}, splitFile, ve, s.level)
+ if err != nil {
+ return err
+ }
+ if _, ok := ve.DeletedFiles[deletedFileEntry{
+ Level: s.level,
+ FileNum: splitFile.FileNum,
+ }]; !ok {
+ panic("did not split file that was expected to be split")
+ }
+ replacedFiles[splitFile.FileNum] = added
+ for i := range added {
+ if s.ingestFile.Overlaps(d.cmp, added[i].Meta.Smallest.UserKey, added[i].Meta.Largest.UserKey, added[i].Meta.Largest.IsExclusiveSentinel()) {
+ panic("ingest-time split produced a file that overlaps with ingested file")
+ }
+ }
+ updateMetrics(splitFile, s.level, added)
+ }
+ // Flatten the version edit by removing any entries from ve.NewFiles that
+ // are also in ve.DeletedFiles.
+ newNewFiles := ve.NewFiles[:0]
+ for i := range ve.NewFiles {
+ fn := ve.NewFiles[i].Meta.FileNum
+ deEntry := deletedFileEntry{Level: ve.NewFiles[i].Level, FileNum: fn}
+ if _, ok := ve.DeletedFiles[deEntry]; ok {
+ delete(ve.DeletedFiles, deEntry)
+ } else {
+ newNewFiles = append(newNewFiles, ve.NewFiles[i])
+ }
+ }
+ ve.NewFiles = newNewFiles
+ return nil
+}
func (d *DB) ingestApply(
jobID int,
@@ -1835,7 +1986,7 @@ func (d *DB) ingestApply(
ve := &versionEdit{
NewFiles: make([]newFileEntry, lr.fileCount),
}
- if exciseSpan.Valid() {
+ if exciseSpan.Valid() || (d.opts.Experimental.IngestSplit != nil && d.opts.Experimental.IngestSplit()) {
ve.DeletedFiles = map[manifest.DeletedFileEntry]*manifest.FileMetadata{}
}
metrics := make(map[int]*LevelMetrics)
@@ -1860,9 +2011,17 @@ func (d *DB) ingestApply(
}
}
+ shouldIngestSplit := d.opts.Experimental.IngestSplit != nil &&
+ d.opts.Experimental.IngestSplit() && d.FormatMajorVersion() >= FormatVirtualSSTables
current := d.mu.versions.currentVersion()
baseLevel := d.mu.versions.picker.getBaseLevel()
iterOps := IterOptions{logger: d.opts.Logger}
+ // filesToSplit is a list where each element is a pair consisting of a file
+ // being ingested and a file being split to make room for an ingestion into
+ // that level. Each ingested file will appear at most once in this list. It
+ // is possible for split files to appear twice in this list.
+ filesToSplit := make([]ingestSplitFile, 0)
+ checkCompactions := false
for i := 0; i < lr.fileCount; i++ {
// Determine the lowest level in the LSM for which the sstable doesn't
// overlap any existing files in the level.
@@ -1895,6 +2054,7 @@ func (d *DB) ingestApply(
if externalFile {
ve.CreatedBackingTables = append(ve.CreatedBackingTables, m.FileBacking)
}
+ var splitFile *fileMetadata
if exciseSpan.Valid() && exciseSpan.Contains(d.cmp, m.Smallest) && exciseSpan.Contains(d.cmp, m.Largest) {
// This file fits perfectly within the excise span. We can slot it at
// L6, or sharedLevelsStart - 1 if we have shared files.
@@ -1907,7 +2067,31 @@ func (d *DB) ingestApply(
f.Level = 6
}
} else {
- f.Level, err = findTargetLevel(d.newIters, d.tableNewRangeKeyIter, iterOps, d.cmp, current, baseLevel, d.mu.compact.inProgress, m)
+ // TODO(bilal): findTargetLevel does disk IO (reading files for data
+ // overlap) even though we're holding onto d.mu. Consider unlocking
+ // d.mu while we do this. We already hold versions.logLock so we should
+ // not see any version applications while we're at this. The one
+ // complication here would be pulling out the mu.compact.inProgress
+ // check from findTargetLevel, as that requires d.mu to be held.
+ f.Level, splitFile, err = findTargetLevel(
+ d.newIters, d.tableNewRangeKeyIter, iterOps, d.opts.Comparer, current, baseLevel, d.mu.compact.inProgress, m, shouldIngestSplit)
+ }
+
+ if splitFile != nil {
+ if invariants.Enabled {
+ if lf := current.Levels[f.Level].Find(d.cmp, splitFile); lf == nil {
+ panic("splitFile returned is not in level it should be")
+ }
+ }
+ // We take advantage of the fact that we won't drop the db mutex
+ // between now and the call to logAndApply. So, no files should
+ // get added to a new in-progress compaction at this point. We can
+ // avoid having to iterate on in-progress compactions to cancel them
+ // if none of the files being split have a compacting state.
+ if splitFile.IsCompacting() {
+ checkCompactions = true
+ }
+ filesToSplit = append(filesToSplit, ingestSplitFile{ingestFile: m, splitFile: splitFile, level: f.Level})
}
}
if err != nil {
@@ -1925,6 +2109,26 @@ func (d *DB) ingestApply(
levelMetrics.BytesIngested += m.Size
levelMetrics.TablesIngested++
}
+ // replacedFiles maps files excised due to exciseSpan (or splitFiles returned
+ // by ingestTargetLevel), to files that were created to replace it. This map
+ // is used to resolve references to split files in filesToSplit, as it is
+ // possible for a file that we want to split to no longer exist or have a
+ // newer fileMetadata due to a split induced by another ingestion file, or an
+ // excise.
+ replacedFiles := make(map[base.FileNum][]newFileEntry)
+ updateLevelMetricsOnExcise := func(m *fileMetadata, level int, added []newFileEntry) {
+ levelMetrics := metrics[level]
+ if levelMetrics == nil {
+ levelMetrics = &LevelMetrics{}
+ metrics[level] = levelMetrics
+ }
+ levelMetrics.NumFiles--
+ levelMetrics.Size -= int64(m.Size)
+ for i := range added {
+ levelMetrics.NumFiles++
+ levelMetrics.Size += int64(added[i].Meta.Size)
+ }
+ }
if exciseSpan.Valid() {
// Iterate through all levels and find files that intersect with exciseSpan.
//
@@ -1946,7 +2150,7 @@ func (d *DB) ingestApply(
iter := overlaps.Iter()
for m := iter.First(); m != nil; m = iter.Next() {
- excised, err := d.excise(exciseSpan, m, ve, level)
+ newFiles, err := d.excise(exciseSpan, m, ve, level)
if err != nil {
return nil, err
}
@@ -1958,19 +2162,19 @@ func (d *DB) ingestApply(
// We did not excise this file.
continue
}
- levelMetrics := metrics[level]
- if levelMetrics == nil {
- levelMetrics = &LevelMetrics{}
- metrics[level] = levelMetrics
- }
- levelMetrics.NumFiles--
- levelMetrics.Size -= int64(m.Size)
- for i := range excised {
- levelMetrics.NumFiles++
- levelMetrics.Size += int64(excised[i].Meta.Size)
- }
+ replacedFiles[m.FileNum] = newFiles
+ updateLevelMetricsOnExcise(m, level, newFiles)
}
}
+ }
+ if len(filesToSplit) > 0 {
+ // For the same reasons as the above call to excise, we hold the db mutex
+ // while calling this method.
+ if err := d.ingestSplit(ve, updateLevelMetricsOnExcise, filesToSplit, replacedFiles); err != nil {
+ return nil, err
+ }
+ }
+ if len(filesToSplit) > 0 || exciseSpan.Valid() {
for c := range d.mu.compact.inProgress {
if c.versionEditApplied {
continue
@@ -1985,22 +2189,41 @@ func (d *DB) ingestApply(
if exciseSpan.OverlapsInternalKeyRange(d.cmp, c.smallest, c.largest) {
c.cancel.Store(true)
}
+ // Check if this compaction's inputs have been replaced due to an
+ // ingest-time split. In that case, cancel the compaction as a newly picked
+ // compaction would need to include any new files that slid in between
+ // previously-existing files. Note that we cancel any compaction that has a
+ // file that was ingest-split as an input, even if it started before this
+ // ingestion.
+ if checkCompactions {
+ for i := range c.inputs {
+ iter := c.inputs[i].files.Iter()
+ for f := iter.First(); f != nil; f = iter.Next() {
+ if _, ok := replacedFiles[f.FileNum]; ok {
+ c.cancel.Store(true)
+ break
+ }
+ }
+ }
+ }
}
// Check for any EventuallyFileOnlySnapshots that could be watching for
// an excise on this span.
- for s := d.mu.snapshots.root.next; s != &d.mu.snapshots.root; s = s.next {
- if s.efos == nil {
- continue
- }
- efos := s.efos
- // TODO(bilal): We can make this faster by taking advantage of the sorted
- // nature of protectedRanges to do a sort.Search, or even maintaining a
- // global list of all protected ranges instead of having to peer into every
- // snapshot.
- for i := range efos.protectedRanges {
- if efos.protectedRanges[i].OverlapsKeyRange(d.cmp, exciseSpan) {
- efos.excised.Store(true)
- break
+ if exciseSpan.Valid() {
+ for s := d.mu.snapshots.root.next; s != &d.mu.snapshots.root; s = s.next {
+ if s.efos == nil {
+ continue
+ }
+ efos := s.efos
+ // TODO(bilal): We can make this faster by taking advantage of the sorted
+ // nature of protectedRanges to do a sort.Search, or even maintaining a
+ // global list of all protected ranges instead of having to peer into every
+ // snapshot.
+ for i := range efos.protectedRanges {
+ if efos.protectedRanges[i].OverlapsKeyRange(d.cmp, exciseSpan) {
+ efos.excised.Store(true)
+ break
+ }
}
}
}
diff --git a/vendor/github.com/cockroachdb/pebble/internal/manifest/version.go b/vendor/github.com/cockroachdb/pebble/internal/manifest/version.go
index 0c75b13e92..265bcabd56 100644
--- a/vendor/github.com/cockroachdb/pebble/internal/manifest/version.go
+++ b/vendor/github.com/cockroachdb/pebble/internal/manifest/version.go
@@ -49,6 +49,9 @@ type TableStats struct {
// The number of point and range deletion entries in the table.
NumDeletions uint64
// NumRangeKeySets is the total number of range key sets in the table.
+ //
+ // NB: If there's a chance that the sstable contains any range key sets,
+ // then NumRangeKeySets must be > 0.
NumRangeKeySets uint64
// Estimate of the total disk space that may be dropped by this table's
// point deletions by compacting them.
@@ -173,11 +176,9 @@ type FileMetadata struct {
// Size is the size of the file, in bytes. Size is an approximate value for
// virtual sstables.
//
- // INVARIANT: when !FileMetadata.Virtual, Size == FileBacking.Size.
- //
- // TODO(bananabrick): Size is currently used in metrics, and for many key
- // Pebble level heuristics. Make sure that the heuristics will still work
- // appropriately with an approximate value of size.
+ // INVARIANTS:
+ // - When !FileMetadata.Virtual, Size == FileBacking.Size.
+ // - Size should be non-zero. Size 0 virtual sstables must not be created.
Size uint64
// File creation time in seconds since the epoch (1970-01-01 00:00:00
// UTC). For ingested sstables, this corresponds to the time the file was
@@ -302,6 +303,7 @@ type PhysicalFileMeta struct {
// The underlying file's size is stored in FileBacking.Size, though it could
// also be estimated or could correspond to just the referenced portion of
// a file (eg. if the file originated on another node).
+// - Size must be > 0.
// - SmallestSeqNum and LargestSeqNum are loose bounds for virtual sstables.
// This means that all keys in the virtual sstable must have seqnums within
// [SmallestSeqNum, LargestSeqNum], however there's no guarantee that there's
diff --git a/vendor/github.com/cockroachdb/pebble/iterator.go b/vendor/github.com/cockroachdb/pebble/iterator.go
index 8e74c1773f..d6117a5569 100644
--- a/vendor/github.com/cockroachdb/pebble/iterator.go
+++ b/vendor/github.com/cockroachdb/pebble/iterator.go
@@ -550,6 +550,21 @@ func (i *Iterator) findNextEntry(limit []byte) {
return
}
+ // If the user has configured a SkipPoint function, invoke it to see
+ // whether we should skip over the current user key.
+ if i.opts.SkipPoint != nil && key.Kind() != InternalKeyKindRangeKeySet && i.opts.SkipPoint(i.iterKey.UserKey) {
+ // NB: We could call nextUserKey, but in some cases the SkipPoint
+ // predicate function might be cheaper than nextUserKey's key copy
+ // and key comparison. This should be the case for MVCC suffix
+ // comparisons, for example. In the future, we could expand the
+ // SkipPoint interface to give the implementor more control over
+ // whether we skip over just the internal key, the user key, or even
+ // the key prefix.
+ i.stats.ForwardStepCount[InternalIterCall]++
+ i.iterKey, i.iterValue = i.iter.Next()
+ continue
+ }
+
switch key.Kind() {
case InternalKeyKindRangeKeySet:
// Save the current key.
@@ -619,6 +634,13 @@ func (i *Iterator) findNextEntry(limit []byte) {
}
func (i *Iterator) nextPointCurrentUserKey() bool {
+ // If the user has configured a SkipPoint function and the current user key
+ // would be skipped by it, there's no need to step forward looking for a
+ // point key. If we were to find one, it should be skipped anyways.
+ if i.opts.SkipPoint != nil && i.opts.SkipPoint(i.key) {
+ return false
+ }
+
i.pos = iterPosCurForward
i.iterKey, i.iterValue = i.iter.Next()
@@ -911,6 +933,26 @@ func (i *Iterator) findPrevEntry(limit []byte) {
}
}
+ // If the user has configured a SkipPoint function, invoke it to see
+ // whether we should skip over the current user key.
+ if i.opts.SkipPoint != nil && key.Kind() != InternalKeyKindRangeKeySet && i.opts.SkipPoint(key.UserKey) {
+ // NB: We could call prevUserKey, but in some cases the SkipPoint
+ // predicate function might be cheaper than prevUserKey's key copy
+ // and key comparison. This should be the case for MVCC suffix
+ // comparisons, for example. In the future, we could expand the
+ // SkipPoint interface to give the implementor more control over
+ // whether we skip over just the internal key, the user key, or even
+ // the key prefix.
+ i.stats.ReverseStepCount[InternalIterCall]++
+ i.iterKey, i.iterValue = i.iter.Prev()
+ if limit != nil && i.iterKey != nil && i.cmp(limit, i.iterKey.UserKey) > 0 && !i.rangeKeyWithinLimit(limit) {
+ i.iterValidityState = IterAtLimit
+ i.pos = iterPosCurReversePaused
+ return
+ }
+ continue
+ }
+
switch key.Kind() {
case InternalKeyKindRangeKeySet:
// Range key start boundary markers are interleaved with the maximum
@@ -948,12 +990,12 @@ func (i *Iterator) findPrevEntry(limit []byte) {
// Compare with the limit. We could optimize by only checking when
// we step to the previous user key, but detecting that requires a
// comparison too. Note that this position may already passed a
- // number of versions of this user key, but they are all deleted,
- // so the fact that a subsequent Prev*() call will not see them is
+ // number of versions of this user key, but they are all deleted, so
+ // the fact that a subsequent Prev*() call will not see them is
// harmless. Also note that this is the only place in the loop,
- // other than the firstLoopIter case above, where we could step
- // to a different user key and start processing it for returning
- // to the caller.
+ // other than the firstLoopIter and SkipPoint cases above, where we
+ // could step to a different user key and start processing it for
+ // returning to the caller.
if limit != nil && i.iterKey != nil && i.cmp(limit, i.iterKey.UserKey) > 0 && !i.rangeKeyWithinLimit(limit) {
i.iterValidityState = IterAtLimit
i.pos = iterPosCurReversePaused
@@ -2428,7 +2470,8 @@ func (i *Iterator) SetOptions(o *IterOptions) {
// If either options specify block property filters for an iterator stack,
// reconstruct it.
if i.pointIter != nil && (closeBoth || len(o.PointKeyFilters) > 0 || len(i.opts.PointKeyFilters) > 0 ||
- o.RangeKeyMasking.Filter != nil || i.opts.RangeKeyMasking.Filter != nil) {
+ o.RangeKeyMasking.Filter != nil || i.opts.RangeKeyMasking.Filter != nil || o.SkipPoint != nil ||
+ i.opts.SkipPoint != nil) {
i.err = firstError(i.err, i.pointIter.Close())
i.pointIter = nil
}
diff --git a/vendor/github.com/cockroachdb/pebble/level_checker.go b/vendor/github.com/cockroachdb/pebble/level_checker.go
index 4cec440c30..2901c4559f 100644
--- a/vendor/github.com/cockroachdb/pebble/level_checker.go
+++ b/vendor/github.com/cockroachdb/pebble/level_checker.go
@@ -345,7 +345,7 @@ func iterateAndCheckTombstones(
type checkConfig struct {
logger Logger
- cmp Compare
+ comparer *Comparer
readState *readState
newIters tableNewIters
seqNum uint64
@@ -354,6 +354,9 @@ type checkConfig struct {
formatKey base.FormatKey
}
+// cmp is shorthand for comparer.Compare.
+func (c *checkConfig) cmp(a, b []byte) int { return c.comparer.Compare(a, b) }
+
func checkRangeTombstones(c *checkConfig) error {
var level int
var tombstones []tombstoneWithLevel
@@ -571,7 +574,7 @@ func (d *DB) CheckLevels(stats *CheckLevelsStats) error {
checkConfig := &checkConfig{
logger: d.opts.Logger,
- cmp: d.cmp,
+ comparer: d.opts.Comparer,
readState: readState,
newIters: d.newIters,
seqNum: seqNum,
@@ -639,7 +642,7 @@ func checkLevelsInternal(c *checkConfig) (err error) {
manifestIter := current.L0SublevelFiles[sublevel].Iter()
iterOpts := IterOptions{logger: c.logger}
li := &levelIter{}
- li.init(context.Background(), iterOpts, c.cmp, nil /* split */, c.newIters, manifestIter,
+ li.init(context.Background(), iterOpts, c.comparer, c.newIters, manifestIter,
manifest.L0Sublevel(sublevel), internalIterOpts{})
li.initRangeDel(&mlevelAlloc[0].rangeDelIter)
li.initBoundaryContext(&mlevelAlloc[0].levelIterBoundaryContext)
@@ -653,7 +656,7 @@ func checkLevelsInternal(c *checkConfig) (err error) {
iterOpts := IterOptions{logger: c.logger}
li := &levelIter{}
- li.init(context.Background(), iterOpts, c.cmp, nil /* split */, c.newIters,
+ li.init(context.Background(), iterOpts, c.comparer, c.newIters,
current.Levels[level].Iter(), manifest.Level(level), internalIterOpts{})
li.initRangeDel(&mlevelAlloc[0].rangeDelIter)
li.initBoundaryContext(&mlevelAlloc[0].levelIterBoundaryContext)
diff --git a/vendor/github.com/cockroachdb/pebble/level_iter.go b/vendor/github.com/cockroachdb/pebble/level_iter.go
index b54c6f2107..ae6b045341 100644
--- a/vendor/github.com/cockroachdb/pebble/level_iter.go
+++ b/vendor/github.com/cockroachdb/pebble/level_iter.go
@@ -82,10 +82,11 @@ type levelIter struct {
// short-lived (since they pin sstables), (b) plumbing a context into every
// method is very painful, (c) they do not (yet) respect context
// cancellation and are only used for tracing.
- ctx context.Context
- logger Logger
- cmp Compare
- split Split
+ ctx context.Context
+ logger Logger
+ comparer *Comparer
+ cmp Compare
+ split Split
// The lower/upper bounds for iteration as specified at creation or the most
// recent call to SetBounds.
lower []byte
@@ -241,15 +242,14 @@ var _ base.InternalIterator = (*levelIter)(nil)
// parameter if the caller is never going to call SeekPrefixGE.
func newLevelIter(
opts IterOptions,
- cmp Compare,
- split Split,
+ comparer *Comparer,
newIters tableNewIters,
files manifest.LevelIterator,
level manifest.Level,
internalOpts internalIterOpts,
) *levelIter {
l := &levelIter{}
- l.init(context.Background(), opts, cmp, split, newIters, files, level,
+ l.init(context.Background(), opts, comparer, newIters, files, level,
internalOpts)
return l
}
@@ -257,8 +257,7 @@ func newLevelIter(
func (l *levelIter) init(
ctx context.Context,
opts IterOptions,
- cmp Compare,
- split Split,
+ comparer *Comparer,
newIters tableNewIters,
files manifest.LevelIterator,
level manifest.Level,
@@ -278,8 +277,9 @@ func (l *levelIter) init(
l.tableOpts.UseL6Filters = opts.UseL6Filters
l.tableOpts.level = l.level
l.tableOpts.snapshotForHideObsoletePoints = opts.snapshotForHideObsoletePoints
- l.cmp = cmp
- l.split = split
+ l.comparer = comparer
+ l.cmp = comparer.Compare
+ l.split = comparer.Split
l.iterFile = nil
l.newIters = newIters
l.files = files
diff --git a/vendor/github.com/cockroachdb/pebble/metrics.go b/vendor/github.com/cockroachdb/pebble/metrics.go
index e10fdf75a2..ccaa5a9d74 100644
--- a/vendor/github.com/cockroachdb/pebble/metrics.go
+++ b/vendor/github.com/cockroachdb/pebble/metrics.go
@@ -46,7 +46,8 @@ type LevelMetrics struct {
NumFiles int64
// The total size in bytes of the files in the level.
Size int64
- // The level's compaction score.
+ // The level's compaction score. This is the compensatedScoreRatio in the
+ // candidateLevelInfo.
Score float64
// The number of incoming bytes from other levels read during
// compactions. This excludes bytes moved and bytes ingested. For L0 this is
diff --git a/vendor/github.com/cockroachdb/pebble/objstorage/objstorageprovider/sharedcache/shared_cache.go b/vendor/github.com/cockroachdb/pebble/objstorage/objstorageprovider/sharedcache/shared_cache.go
index 84f79350d6..6d6409e9b2 100644
--- a/vendor/github.com/cockroachdb/pebble/objstorage/objstorageprovider/sharedcache/shared_cache.go
+++ b/vendor/github.com/cockroachdb/pebble/objstorage/objstorageprovider/sharedcache/shared_cache.go
@@ -130,9 +130,10 @@ func Open(
sizeBytes int64,
numShards int,
) (*Cache, error) {
- min := shardingBlockSize * int64(numShards)
- if sizeBytes < min {
- return nil, errors.Errorf("cache size %d lower than min %d", sizeBytes, min)
+ if minSize := shardingBlockSize * int64(numShards); sizeBytes < minSize {
+ // Up the size so that we have one block per shard. In practice, this should
+ // only happen in tests.
+ sizeBytes = minSize
}
c := &Cache{
diff --git a/vendor/github.com/cockroachdb/pebble/open.go b/vendor/github.com/cockroachdb/pebble/open.go
index 8da3e2ae51..7997087161 100644
--- a/vendor/github.com/cockroachdb/pebble/open.go
+++ b/vendor/github.com/cockroachdb/pebble/open.go
@@ -260,7 +260,7 @@ func Open(dirname string, opts *Options) (db *DB, _ error) {
}
// Create the DB.
- if err := d.mu.versions.create(jobID, dirname, opts, manifestMarker, setCurrent, &d.mu.Mutex); err != nil {
+ if err := d.mu.versions.create(jobID, dirname, opts, manifestMarker, setCurrent, d.FormatMajorVersion, &d.mu.Mutex); err != nil {
return nil, err
}
} else {
@@ -268,7 +268,7 @@ func Open(dirname string, opts *Options) (db *DB, _ error) {
return nil, errors.Wrapf(ErrDBAlreadyExists, "dirname=%q", dirname)
}
// Load the version set.
- if err := d.mu.versions.load(dirname, opts, manifestFileNum.FileNum(), manifestMarker, setCurrent, &d.mu.Mutex); err != nil {
+ if err := d.mu.versions.load(dirname, opts, manifestFileNum.FileNum(), manifestMarker, setCurrent, d.FormatMajorVersion, &d.mu.Mutex); err != nil {
return nil, err
}
if opts.ErrorIfNotPristine {
diff --git a/vendor/github.com/cockroachdb/pebble/options.go b/vendor/github.com/cockroachdb/pebble/options.go
index 7f1da0924f..eee41c9741 100644
--- a/vendor/github.com/cockroachdb/pebble/options.go
+++ b/vendor/github.com/cockroachdb/pebble/options.go
@@ -119,6 +119,16 @@ type IterOptions struct {
// false to skip scanning. This function must be thread-safe since the same
// function can be used by multiple iterators, if the iterator is cloned.
TableFilter func(userProps map[string]string) bool
+ // SkipPoint may be used to skip over point keys that don't match an
+ // arbitrary predicate during iteration. If set, the Iterator invokes
+ // SkipPoint for keys encountered. If SkipPoint returns true, the iterator
+ // will skip the key without yielding it to the iterator operation in
+ // progress.
+ //
+ // SkipPoint must be a pure function and always return the same result when
+ // provided the same arguments. The iterator may call SkipPoint multiple
+ // times for the same user key.
+ SkipPoint func(userKey []byte) bool
// PointKeyFilters can be used to avoid scanning tables and blocks in tables
// when iterating over point keys. This slice represents an intersection
// across all filters, i.e., all filters must indicate that the block is
@@ -540,6 +550,11 @@ type Options struct {
// concurrency slots as determined by the two options is chosen.
CompactionDebtConcurrency uint64
+ // IngestSplit, if it returns true, allows for ingest-time splitting of
+ // existing sstables into two virtual sstables to allow ingestion sstables to
+ // slot into a lower level than they otherwise would have.
+ IngestSplit func() bool
+
// ReadCompactionRate controls the frequency of read triggered
// compactions by adjusting `AllowedSeeks` in manifest.FileMetadata:
//
diff --git a/vendor/github.com/cockroachdb/pebble/scan_internal.go b/vendor/github.com/cockroachdb/pebble/scan_internal.go
index 721181c5cd..5eec6c20b7 100644
--- a/vendor/github.com/cockroachdb/pebble/scan_internal.go
+++ b/vendor/github.com/cockroachdb/pebble/scan_internal.go
@@ -763,7 +763,7 @@ func (i *scanInternalIterator) constructPointIter(memtables flushableList, buf *
rli := &rangeDelLevels[levelsIndex]
li.init(
- context.Background(), i.opts.IterOptions, i.comparer.Compare, i.comparer.Split, i.newIters, files, level,
+ context.Background(), i.opts.IterOptions, i.comparer, i.newIters, files, level,
internalIterOpts{})
li.initBoundaryContext(&mlevels[mlevelsIndex].levelIterBoundaryContext)
mlevels[mlevelsIndex].iter = li
diff --git a/vendor/github.com/cockroachdb/pebble/sstable/properties.go b/vendor/github.com/cockroachdb/pebble/sstable/properties.go
index 004d14ad35..6b224d62a9 100644
--- a/vendor/github.com/cockroachdb/pebble/sstable/properties.go
+++ b/vendor/github.com/cockroachdb/pebble/sstable/properties.go
@@ -25,10 +25,20 @@ var propBoolFalse = []byte{'0'}
var propOffsetTagMap = make(map[uintptr]string)
-func init() {
- t := reflect.TypeOf(Properties{})
+func generateTagMaps(t reflect.Type) {
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
+ if f.Type.Kind() == reflect.Struct {
+ if tag := f.Tag.Get("prop"); i == 0 && tag == "pebble.embbeded_common_properties" {
+ // CommonProperties struct embedded in Properties. Note that since
+ // CommonProperties is placed at the top of properties we can use
+ // the offsets of the fields within CommonProperties to determine
+ // the offsets of those fields within Properties.
+ generateTagMaps(f.Type)
+ continue
+ }
+ panic("pebble: unknown struct type in Properties")
+ }
if tag := f.Tag.Get("prop"); tag != "" {
switch f.Type.Kind() {
case reflect.Bool:
@@ -44,10 +54,75 @@ func init() {
}
}
+func init() {
+ t := reflect.TypeOf(Properties{})
+ generateTagMaps(t)
+}
+
+// CommonProperties holds properties for either a virtual or a physical sstable. This
+// can be used by code which doesn't care to make the distinction between physical
+// and virtual sstables properties.
+//
+// For virtual sstables, fields are constructed through extrapolation upon virtual
+// reader construction. See MakeVirtualReader for implementation details.
+//
+// NB: The values of these properties can affect correctness. For example,
+// if NumRangeKeySets == 0, but the sstable actually contains range keys, then
+// the iterators will behave incorrectly.
+type CommonProperties struct {
+ // The number of entries in this table.
+ NumEntries uint64 `prop:"rocksdb.num.entries"`
+ // Total raw key size.
+ RawKeySize uint64 `prop:"rocksdb.raw.key.size"`
+ // Total raw value size.
+ RawValueSize uint64 `prop:"rocksdb.raw.value.size"`
+ // Total raw key size of point deletion tombstones. This value is comparable
+ // to RawKeySize.
+ RawPointTombstoneKeySize uint64 `prop:"pebble.raw.point-tombstone.key.size"`
+ // Sum of the raw value sizes carried by point deletion tombstones
+ // containing size estimates. See the DeleteSized key kind. This value is
+ // comparable to Raw{Key,Value}Size.
+ RawPointTombstoneValueSize uint64 `prop:"pebble.raw.point-tombstone.value.size"`
+ // The number of point deletion entries ("tombstones") in this table that
+ // carry a size hint indicating the size of the value the tombstone deletes.
+ NumSizedDeletions uint64 `prop:"pebble.num.deletions.sized"`
+ // The number of deletion entries in this table, including both point and
+ // range deletions.
+ NumDeletions uint64 `prop:"rocksdb.deleted.keys"`
+ // The number of range deletions in this table.
+ NumRangeDeletions uint64 `prop:"rocksdb.num.range-deletions"`
+ // The number of RANGEKEYDELs in this table.
+ NumRangeKeyDels uint64 `prop:"pebble.num.range-key-dels"`
+ // The number of RANGEKEYSETs in this table.
+ NumRangeKeySets uint64 `prop:"pebble.num.range-key-sets"`
+ // Total size of value blocks and value index block. Only serialized if > 0.
+ ValueBlocksSize uint64 `prop:"pebble.value-blocks.size"`
+}
+
+// String is only used for testing purposes.
+func (c *CommonProperties) String() string {
+ var buf bytes.Buffer
+ v := reflect.ValueOf(*c)
+ loaded := make(map[uintptr]struct{})
+ writeProperties(loaded, v, &buf)
+ return buf.String()
+}
+
+// NumPointDeletions is the number of point deletions in the sstable. For virtual
+// sstables, this is an estimate.
+func (c *CommonProperties) NumPointDeletions() uint64 {
+ return c.NumDeletions - c.NumRangeDeletions
+}
+
// Properties holds the sstable property values. The properties are
// automatically populated during sstable creation and load from the properties
// meta block when an sstable is opened.
type Properties struct {
+ // CommonProperties needs to be at the top of the Properties struct so that the
+ // offsets of the fields in CommonProperties match the offsets of the embedded
+ // fields of CommonProperties in Properties.
+ CommonProperties `prop:"pebble.embbeded_common_properties"`
+
// The name of the comparer used in this table.
ComparerName string `prop:"rocksdb.comparator"`
// The compression algorithm used to compress blocks.
@@ -81,22 +156,8 @@ type Properties struct {
MergerName string `prop:"rocksdb.merge.operator"`
// The number of blocks in this table.
NumDataBlocks uint64 `prop:"rocksdb.num.data.blocks"`
- // The number of deletion entries in this table, including both point and
- // range deletions.
- NumDeletions uint64 `prop:"rocksdb.deleted.keys"`
- // The number of point deletion entries ("tombstones") in this table that
- // carry a size hint indicating the size of the value the tombstone deletes.
- NumSizedDeletions uint64 `prop:"pebble.num.deletions.sized"`
- // The number of entries in this table.
- NumEntries uint64 `prop:"rocksdb.num.entries"`
// The number of merge operands in the table.
NumMergeOperands uint64 `prop:"rocksdb.merge.operands"`
- // The number of range deletions in this table.
- NumRangeDeletions uint64 `prop:"rocksdb.num.range-deletions"`
- // The number of RANGEKEYDELs in this table.
- NumRangeKeyDels uint64 `prop:"pebble.num.range-key-dels"`
- // The number of RANGEKEYSETs in this table.
- NumRangeKeySets uint64 `prop:"pebble.num.range-key-sets"`
// The number of RANGEKEYUNSETs in this table.
NumRangeKeyUnsets uint64 `prop:"pebble.num.range-key-unsets"`
// The number of value blocks in this table. Only serialized if > 0.
@@ -111,21 +172,10 @@ type Properties struct {
// A comma separated list of names of the property collectors used in this
// table.
PropertyCollectorNames string `prop:"rocksdb.property.collectors"`
- // Total raw key size.
- RawKeySize uint64 `prop:"rocksdb.raw.key.size"`
- // Total raw key size of point deletion tombstones. This value is comparable
- // to RawKeySize.
- RawPointTombstoneKeySize uint64 `prop:"pebble.raw.point-tombstone.key.size"`
- // Sum of the raw value sizes carried by point deletion tombstones
- // containing size estimates. See the DeleteSized key kind. This value is
- // comparable to Raw{Key,Value}Size.
- RawPointTombstoneValueSize uint64 `prop:"pebble.raw.point-tombstone.value.size"`
// Total raw rangekey key size.
RawRangeKeyKeySize uint64 `prop:"pebble.raw.range-key.key.size"`
// Total raw rangekey value size.
RawRangeKeyValueSize uint64 `prop:"pebble.raw.range-key.value.size"`
- // Total raw value size.
- RawValueSize uint64 `prop:"rocksdb.raw.value.size"`
// The total number of keys in this table that were pinned by open snapshots.
SnapshotPinnedKeys uint64 `prop:"pebble.num.snapshot-pinned-keys"`
// The cumulative bytes of keys in this table that were pinned by
@@ -138,8 +188,6 @@ type Properties struct {
TopLevelIndexSize uint64 `prop:"rocksdb.top-level.index.size"`
// User collected properties.
UserProperties map[string]string
- // Total size of value blocks and value index block. Only serialized if > 0.
- ValueBlocksSize uint64 `prop:"pebble.value-blocks.size"`
// If filtering is enabled, was the filter created on the whole key.
WholeKeyFiltering bool `prop:"rocksdb.block.based.table.whole.key.filtering"`
@@ -160,12 +208,15 @@ func (p *Properties) NumRangeKeys() uint64 {
return p.NumRangeKeyDels + p.NumRangeKeySets + p.NumRangeKeyUnsets
}
-func (p *Properties) String() string {
- var buf bytes.Buffer
- v := reflect.ValueOf(*p)
+func writeProperties(loaded map[uintptr]struct{}, v reflect.Value, buf *bytes.Buffer) {
vt := v.Type()
for i := 0; i < v.NumField(); i++ {
ft := vt.Field(i)
+ if ft.Type.Kind() == reflect.Struct {
+ // Embedded struct within the properties.
+ writeProperties(loaded, v.Field(i), buf)
+ continue
+ }
tag := ft.Tag.Get("prop")
if tag == "" {
continue
@@ -175,25 +226,33 @@ func (p *Properties) String() string {
// TODO(peter): Use f.IsZero() when we can rely on go1.13.
if zero := reflect.Zero(f.Type()); zero.Interface() == f.Interface() {
// Skip printing of zero values which were not loaded from disk.
- if _, ok := p.Loaded[ft.Offset]; !ok {
+ if _, ok := loaded[ft.Offset]; !ok {
continue
}
}
- fmt.Fprintf(&buf, "%s: ", tag)
+ fmt.Fprintf(buf, "%s: ", tag)
switch ft.Type.Kind() {
case reflect.Bool:
- fmt.Fprintf(&buf, "%t\n", f.Bool())
+ fmt.Fprintf(buf, "%t\n", f.Bool())
case reflect.Uint32:
- fmt.Fprintf(&buf, "%d\n", f.Uint())
+ fmt.Fprintf(buf, "%d\n", f.Uint())
case reflect.Uint64:
- fmt.Fprintf(&buf, "%d\n", f.Uint())
+ fmt.Fprintf(buf, "%d\n", f.Uint())
case reflect.String:
- fmt.Fprintf(&buf, "%s\n", f.String())
+ fmt.Fprintf(buf, "%s\n", f.String())
default:
panic("not reached")
}
}
+}
+
+func (p *Properties) String() string {
+ var buf bytes.Buffer
+ v := reflect.ValueOf(*p)
+ writeProperties(p.Loaded, v, &buf)
+
+ // Write the UserProperties.
keys := make([]string, 0, len(p.UserProperties))
for key := range p.UserProperties {
keys = append(keys, key)
@@ -217,7 +276,7 @@ func (p *Properties) load(
for valid := i.First(); valid; valid = i.Next() {
if f, ok := propTagMap[string(i.Key().UserKey)]; ok {
p.Loaded[f.Offset] = struct{}{}
- field := v.FieldByIndex(f.Index)
+ field := v.FieldByName(f.Name)
switch f.Type.Kind() {
case reflect.Bool:
field.SetBool(bytes.Equal(i.Value(), propBoolTrue))
diff --git a/vendor/github.com/cockroachdb/pebble/sstable/reader.go b/vendor/github.com/cockroachdb/pebble/sstable/reader.go
index 1090227108..0cf865933a 100644
--- a/vendor/github.com/cockroachdb/pebble/sstable/reader.go
+++ b/vendor/github.com/cockroachdb/pebble/sstable/reader.go
@@ -174,6 +174,28 @@ func init() {
private.SSTableRawTombstonesOpt = rawTombstonesOpt{}
}
+// CommonReader abstracts functionality over a Reader or a VirtualReader. This
+// can be used by code which doesn't care to distinguish between a reader and a
+// virtual reader.
+type CommonReader interface {
+ NewRawRangeKeyIter() (keyspan.FragmentIterator, error)
+ NewRawRangeDelIter() (keyspan.FragmentIterator, error)
+ NewIterWithBlockPropertyFiltersAndContextEtc(
+ ctx context.Context, lower, upper []byte,
+ filterer *BlockPropertiesFilterer,
+ hideObsoletePoints, useFilterBlock bool,
+ stats *base.InternalIteratorStats,
+ rp ReaderProvider,
+ ) (Iterator, error)
+ NewCompactionIter(
+ bytesIterated *uint64,
+ rp ReaderProvider,
+ bufferPool *BufferPool,
+ ) (Iterator, error)
+ EstimateDiskUsage(start, end []byte) (uint64, error)
+ CommonProperties() *CommonProperties
+}
+
// Reader is a table reader.
type Reader struct {
readable objstorage.Readable
@@ -927,6 +949,11 @@ func (r *Reader) ValidateBlockChecksums() error {
return nil
}
+// CommonProperties implemented the CommonReader interface.
+func (r *Reader) CommonProperties() *CommonProperties {
+ return &r.Properties.CommonProperties
+}
+
// EstimateDiskUsage returns the total size of data blocks overlapping the range
// `[start, end]`. Even if a data block partially overlaps, or we cannot
// determine overlap due to abbreviated index keys, the full data block size is
diff --git a/vendor/github.com/cockroachdb/pebble/sstable/reader_virtual.go b/vendor/github.com/cockroachdb/pebble/sstable/reader_virtual.go
index 8ad5503b35..4fe905d5ca 100644
--- a/vendor/github.com/cockroachdb/pebble/sstable/reader_virtual.go
+++ b/vendor/github.com/cockroachdb/pebble/sstable/reader_virtual.go
@@ -22,13 +22,7 @@ import (
type VirtualReader struct {
vState virtualState
reader *Reader
- Properties struct {
- // RawKeySize, RawValueSize are set upon construction of a
- // VirtualReader. The values of the fields is extrapolated. See
- // MakeVirtualReader for implementation details.
- RawKeySize uint64
- RawValueSize uint64
- }
+ Properties CommonProperties
}
// Lightweight virtual sstable state which can be passed to sstable iterators.
@@ -39,6 +33,10 @@ type virtualState struct {
Compare Compare
}
+func ceilDiv(a, b uint64) uint64 {
+ return (a + b - 1) / b
+}
+
// MakeVirtualReader is used to contruct a reader which can read from virtual
// sstables.
func MakeVirtualReader(reader *Reader, meta manifest.VirtualFileMeta) VirtualReader {
@@ -57,11 +55,21 @@ func MakeVirtualReader(reader *Reader, meta manifest.VirtualFileMeta) VirtualRea
reader: reader,
}
- v.Properties.RawKeySize =
- (reader.Properties.RawKeySize * meta.Size) / meta.FileBacking.Size
- v.Properties.RawValueSize =
- (reader.Properties.RawValueSize * meta.Size) / meta.FileBacking.Size
-
+ v.Properties.RawKeySize = ceilDiv(reader.Properties.RawKeySize*meta.Size, meta.FileBacking.Size)
+ v.Properties.RawValueSize = ceilDiv(reader.Properties.RawValueSize*meta.Size, meta.FileBacking.Size)
+ v.Properties.NumEntries = ceilDiv(reader.Properties.NumEntries*meta.Size, meta.FileBacking.Size)
+ v.Properties.NumDeletions = ceilDiv(reader.Properties.NumDeletions*meta.Size, meta.FileBacking.Size)
+ v.Properties.NumRangeDeletions = ceilDiv(reader.Properties.NumRangeDeletions*meta.Size, meta.FileBacking.Size)
+ v.Properties.NumRangeKeyDels = ceilDiv(reader.Properties.NumRangeKeyDels*meta.Size, meta.FileBacking.Size)
+
+ // Note that we rely on NumRangeKeySets for correctness. If the sstable may
+ // contain range keys, then NumRangeKeySets must be > 0. ceilDiv works because
+ // meta.Size will not be 0 for virtual sstables.
+ v.Properties.NumRangeKeySets = ceilDiv(reader.Properties.NumRangeKeySets*meta.Size, meta.FileBacking.Size)
+ v.Properties.ValueBlocksSize = ceilDiv(reader.Properties.ValueBlocksSize*meta.Size, meta.FileBacking.Size)
+ v.Properties.NumSizedDeletions = ceilDiv(reader.Properties.NumSizedDeletions*meta.Size, meta.FileBacking.Size)
+ v.Properties.RawPointTombstoneKeySize = ceilDiv(reader.Properties.RawPointTombstoneKeySize*meta.Size, meta.FileBacking.Size)
+ v.Properties.RawPointTombstoneValueSize = ceilDiv(reader.Properties.RawPointTombstoneValueSize*meta.Size, meta.FileBacking.Size)
return v
}
@@ -187,3 +195,8 @@ func (v *VirtualReader) EstimateDiskUsage(start, end []byte) (uint64, error) {
_, f, l := v.vState.constrainBounds(start, end, true /* endInclusive */)
return v.reader.EstimateDiskUsage(f, l)
}
+
+// CommonProperties implements the CommonReader interface.
+func (v *VirtualReader) CommonProperties() *CommonProperties {
+ return &v.Properties
+}
diff --git a/vendor/github.com/cockroachdb/pebble/table_cache.go b/vendor/github.com/cockroachdb/pebble/table_cache.go
index 6bdef93687..a29f3cb7a4 100644
--- a/vendor/github.com/cockroachdb/pebble/table_cache.go
+++ b/vendor/github.com/cockroachdb/pebble/table_cache.go
@@ -194,6 +194,30 @@ func (c *tableCacheContainer) estimateSize(
return size, nil
}
+func createCommonReader(v *tableCacheValue, file *fileMetadata) sstable.CommonReader {
+ // TODO(bananabrick): We suffer an allocation if file is a virtual sstable.
+ var cr sstable.CommonReader = v.reader
+ if file.Virtual {
+ virtualReader := sstable.MakeVirtualReader(
+ v.reader, file.VirtualMeta(),
+ )
+ cr = &virtualReader
+ }
+ return cr
+}
+
+func (c *tableCacheContainer) withCommonReader(
+ meta *fileMetadata, fn func(sstable.CommonReader) error,
+) error {
+ s := c.tableCache.getShard(meta.FileBacking.DiskFileNum)
+ v := s.findNode(meta, &c.dbOpts)
+ defer s.unrefValue(v)
+ if v.err != nil {
+ return v.err
+ }
+ return fn(createCommonReader(v, meta))
+}
+
func (c *tableCacheContainer) withReader(meta physicalMeta, fn func(*sstable.Reader) error) error {
s := c.tableCache.getShard(meta.FileBacking.DiskFileNum)
v := s.findNode(meta.FileMetadata, &c.dbOpts)
@@ -432,24 +456,8 @@ func (c *tableCacheShard) newIters(
return nil, nil, err
}
- type iterCreator interface {
- NewRawRangeDelIter() (keyspan.FragmentIterator, error)
- NewIterWithBlockPropertyFiltersAndContextEtc(ctx context.Context, lower, upper []byte, filterer *sstable.BlockPropertiesFilterer, hideObsoletePoints, useFilterBlock bool, stats *base.InternalIteratorStats, rp sstable.ReaderProvider) (sstable.Iterator, error)
- NewCompactionIter(
- bytesIterated *uint64,
- rp sstable.ReaderProvider,
- bufferPool *sstable.BufferPool,
- ) (sstable.Iterator, error)
- }
-
- // TODO(bananabrick): We suffer an allocation if file is a virtual sstable.
- var ic iterCreator = v.reader
- if file.Virtual {
- virtualReader := sstable.MakeVirtualReader(
- v.reader, file.VirtualMeta(),
- )
- ic = &virtualReader
- }
+ // Note: This suffers an allocation for virtual sstables.
+ cr := createCommonReader(v, file)
provider := dbOpts.objProvider
// Check if this file is a foreign file.
@@ -460,7 +468,7 @@ func (c *tableCacheShard) newIters(
// NB: range-del iterator does not maintain a reference to the table, nor
// does it need to read from it after creation.
- rangeDelIter, err := ic.NewRawRangeDelIter()
+ rangeDelIter, err := cr.NewRawRangeDelIter()
if err != nil {
c.unrefValue(v)
return nil, nil, err
@@ -504,9 +512,9 @@ func (c *tableCacheShard) newIters(
hideObsoletePoints = true
}
if internalOpts.bytesIterated != nil {
- iter, err = ic.NewCompactionIter(internalOpts.bytesIterated, rp, internalOpts.bufferPool)
+ iter, err = cr.NewCompactionIter(internalOpts.bytesIterated, rp, internalOpts.bufferPool)
} else {
- iter, err = ic.NewIterWithBlockPropertyFiltersAndContextEtc(
+ iter, err = cr.NewIterWithBlockPropertyFiltersAndContextEtc(
ctx, opts.GetLowerBound(), opts.GetUpperBound(), filterer, hideObsoletePoints, useFilter,
internalOpts.stats, rp)
}
diff --git a/vendor/github.com/cockroachdb/pebble/table_stats.go b/vendor/github.com/cockroachdb/pebble/table_stats.go
index f20dbf7af3..26af3420b7 100644
--- a/vendor/github.com/cockroachdb/pebble/table_stats.go
+++ b/vendor/github.com/cockroachdb/pebble/table_stats.go
@@ -186,14 +186,9 @@ func (d *DB) loadNewFileStats(
continue
}
- if nf.Meta.Virtual {
- // cannot load virtual table stats
- continue
- }
-
stats, newHints, err := d.loadTableStats(
rs.current, nf.Level,
- nf.Meta.PhysicalMeta(),
+ nf.Meta,
)
if err != nil {
d.opts.EventListener.BackgroundError(err)
@@ -222,12 +217,6 @@ func (d *DB) scanReadStateTableStats(
for l, levelMetadata := range rs.current.Levels {
iter := levelMetadata.Iter()
for f := iter.First(); f != nil; f = iter.Next() {
- if f.Virtual {
- // TODO(bananabrick): Support stats collection for virtual
- // sstables.
- continue
- }
-
// NB: We're not holding d.mu which protects f.Stats, but only the
// active stats collection job updates f.Stats for active files,
// and we ensure only one goroutine runs it at a time through
@@ -236,11 +225,6 @@ func (d *DB) scanReadStateTableStats(
if f.StatsValid() {
continue
}
- // TODO(bilal): Remove this guard when table stats collection is
- // implemented for virtual sstables.
- if f.Virtual {
- continue
- }
// Limit how much work we do per read state. The older the read
// state is, the higher the likelihood files are no longer being
@@ -253,7 +237,7 @@ func (d *DB) scanReadStateTableStats(
}
stats, newHints, err := d.loadTableStats(
- rs.current, l, f.PhysicalMeta(),
+ rs.current, l, f,
)
if err != nil {
// Set `moreRemain` so we'll try again.
@@ -271,25 +255,22 @@ func (d *DB) scanReadStateTableStats(
return fill, hints, moreRemain
}
-// loadTableStats currently only supports stats collection for physical
-// sstables.
-//
-// TODO(bananabrick): Support stats collection for virtual sstables.
func (d *DB) loadTableStats(
- v *version, level int, meta physicalMeta,
+ v *version, level int, meta *fileMetadata,
) (manifest.TableStats, []deleteCompactionHint, error) {
var stats manifest.TableStats
var compactionHints []deleteCompactionHint
- err := d.tableCache.withReader(
- meta, func(r *sstable.Reader) (err error) {
- stats.NumEntries = r.Properties.NumEntries
- stats.NumDeletions = r.Properties.NumDeletions
- if r.Properties.NumPointDeletions() > 0 {
- if err = d.loadTablePointKeyStats(r, v, level, meta, &stats); err != nil {
+ err := d.tableCache.withCommonReader(
+ meta, func(r sstable.CommonReader) (err error) {
+ props := r.CommonProperties()
+ stats.NumEntries = props.NumEntries
+ stats.NumDeletions = props.NumDeletions
+ if props.NumPointDeletions() > 0 {
+ if err = d.loadTablePointKeyStats(props, v, level, meta, &stats); err != nil {
return
}
}
- if r.Properties.NumRangeDeletions > 0 || r.Properties.NumRangeKeyDels > 0 {
+ if props.NumRangeDeletions > 0 || props.NumRangeKeyDels > 0 {
if compactionHints, err = d.loadTableRangeDelStats(
r, v, level, meta, &stats,
); err != nil {
@@ -299,8 +280,8 @@ func (d *DB) loadTableStats(
// TODO(travers): Once we have real-world data, consider collecting
// additional stats that may provide improved heuristics for compaction
// picking.
- stats.NumRangeKeySets = r.Properties.NumRangeKeySets
- stats.ValueBlocksSize = r.Properties.ValueBlocksSize
+ stats.NumRangeKeySets = props.NumRangeKeySets
+ stats.ValueBlocksSize = props.ValueBlocksSize
return
})
if err != nil {
@@ -312,7 +293,11 @@ func (d *DB) loadTableStats(
// loadTablePointKeyStats calculates the point key statistics for the given
// table. The provided manifest.TableStats are updated.
func (d *DB) loadTablePointKeyStats(
- r *sstable.Reader, v *version, level int, meta physicalMeta, stats *manifest.TableStats,
+ props *sstable.CommonProperties,
+ v *version,
+ level int,
+ meta *fileMetadata,
+ stats *manifest.TableStats,
) error {
// TODO(jackson): If the file has a wide keyspace, the average
// value size beneath the entire file might not be representative
@@ -320,21 +305,21 @@ func (d *DB) loadTablePointKeyStats(
// We could write the ranges of 'clusters' of point tombstones to
// a sstable property and call averageValueSizeBeneath for each of
// these narrower ranges to improve the estimate.
- avgValLogicalSize, compressionRatio, err := d.estimateSizesBeneath(v, level, meta)
+ avgValLogicalSize, compressionRatio, err := d.estimateSizesBeneath(v, level, meta, props)
if err != nil {
return err
}
stats.PointDeletionsBytesEstimate =
- pointDeletionsBytesEstimate(meta.Size, &r.Properties, avgValLogicalSize, compressionRatio)
+ pointDeletionsBytesEstimate(meta.Size, props, avgValLogicalSize, compressionRatio)
return nil
}
// loadTableRangeDelStats calculates the range deletion and range key deletion
// statistics for the given table.
func (d *DB) loadTableRangeDelStats(
- r *sstable.Reader, v *version, level int, meta physicalMeta, stats *manifest.TableStats,
+ r sstable.CommonReader, v *version, level int, meta *fileMetadata, stats *manifest.TableStats,
) ([]deleteCompactionHint, error) {
- iter, err := newCombinedDeletionKeyspanIter(d.opts.Comparer, r, meta.FileMetadata)
+ iter, err := newCombinedDeletionKeyspanIter(d.opts.Comparer, r, meta)
if err != nil {
return nil, err
}
@@ -423,7 +408,7 @@ func (d *DB) loadTableRangeDelStats(
hintType: hintType,
start: make([]byte, len(start)),
end: make([]byte, len(end)),
- tombstoneFile: meta.FileMetadata,
+ tombstoneFile: meta,
tombstoneLevel: level,
tombstoneLargestSeqNum: s.LargestSeqNum(),
tombstoneSmallestSeqNum: s.SmallestSeqNum(),
@@ -437,12 +422,21 @@ func (d *DB) loadTableRangeDelStats(
}
func (d *DB) estimateSizesBeneath(
- v *version, level int, meta physicalMeta,
+ v *version, level int, meta *fileMetadata, fileProps *sstable.CommonProperties,
) (avgValueLogicalSize, compressionRatio float64, err error) {
// Find all files in lower levels that overlap with meta,
// summing their value sizes and entry counts.
- file := meta.FileMetadata
+ file := meta
var fileSum, keySum, valSum, entryCount uint64
+ // Include the file itself. This is important because in some instances, the
+ // computed compression ratio is applied to the tombstones contained within
+ // `meta` itself. If there are no files beneath `meta` in the LSM, we would
+ // calculate a compression ratio of 0 which is not accurate for the file's
+ // own tombstones.
+ fileSum += file.Size
+ entryCount += fileProps.NumEntries
+ keySum += fileProps.RawKeySize
+ valSum += fileProps.RawValueSize
addPhysicalTableStats := func(r *sstable.Reader) (err error) {
fileSum += file.Size
@@ -459,15 +453,6 @@ func (d *DB) estimateSizesBeneath(
return nil
}
- // Include the file itself. This is important because in some instances, the
- // computed compression ratio is applied to the tombstones contained within
- // `meta` itself. If there are no files beneath `meta` in the LSM, we would
- // calculate a compression ratio of 0 which is not accurate for the file's
- // own tombstones.
- if err = d.tableCache.withReader(meta, addPhysicalTableStats); err != nil {
- return 0, 0, err
- }
-
for l := level + 1; l < numLevels; l++ {
overlaps := v.Overlaps(l, d.cmp, meta.Smallest.UserKey,
meta.Largest.UserKey, meta.Largest.IsExclusiveSentinel())
@@ -637,8 +622,9 @@ func maybeSetStatsFromProperties(meta physicalMeta, props *sstable.Properties) b
// doesn't require any additional IO and since the number of point
// deletions in the file is low, the error introduced by this crude
// estimate is expected to be small.
- avgValSize, compressionRatio := estimatePhysicalSizes(meta.Size, props)
- pointEstimate = pointDeletionsBytesEstimate(meta.Size, props, avgValSize, compressionRatio)
+ commonProps := &props.CommonProperties
+ avgValSize, compressionRatio := estimatePhysicalSizes(meta.Size, commonProps)
+ pointEstimate = pointDeletionsBytesEstimate(meta.Size, commonProps, avgValSize, compressionRatio)
}
meta.Stats.NumEntries = props.NumEntries
@@ -652,7 +638,7 @@ func maybeSetStatsFromProperties(meta physicalMeta, props *sstable.Properties) b
}
func pointDeletionsBytesEstimate(
- fileSize uint64, props *sstable.Properties, avgValLogicalSize, compressionRatio float64,
+ fileSize uint64, props *sstable.CommonProperties, avgValLogicalSize, compressionRatio float64,
) (estimate uint64) {
if props.NumEntries == 0 {
return 0
@@ -675,7 +661,7 @@ func pointDeletionsBytesEstimate(
// tombstones' encoded values.
//
// For un-sized point tombstones (DELs), we estimate assuming that each
- // point tombstone on average covers 1 key and using average vaue sizes.
+ // point tombstone on average covers 1 key and using average value sizes.
// This is almost certainly an overestimate, but that's probably okay
// because point tombstones can slow range iterations even when they don't
// cover a key.
@@ -739,7 +725,7 @@ func pointDeletionsBytesEstimate(
}
func estimatePhysicalSizes(
- fileSize uint64, props *sstable.Properties,
+ fileSize uint64, props *sstable.CommonProperties,
) (avgValLogicalSize, compressionRatio float64) {
// RawKeySize and RawValueSize are uncompressed totals. Scale according to
// the data size to account for compression, index blocks and metadata
@@ -812,7 +798,7 @@ func estimatePhysicalSizes(
// corresponding to the largest and smallest sequence numbers encountered across
// the range deletes and range keys deletes that comprised the merged spans.
func newCombinedDeletionKeyspanIter(
- comparer *base.Comparer, r *sstable.Reader, m *fileMetadata,
+ comparer *base.Comparer, cr sstable.CommonReader, m *fileMetadata,
) (keyspan.FragmentIterator, error) {
// The range del iter and range key iter are each wrapped in their own
// defragmenting iter. For each iter, abutting spans can always be merged.
@@ -874,7 +860,7 @@ func newCombinedDeletionKeyspanIter(
})
mIter.Init(comparer.Compare, transform, new(keyspan.MergingBuffers))
- iter, err := r.NewRawRangeDelIter()
+ iter, err := cr.NewRawRangeDelIter()
if err != nil {
return nil, err
}
@@ -891,7 +877,7 @@ func newCombinedDeletionKeyspanIter(
mIter.AddLevel(iter)
}
- iter, err = r.NewRawRangeKeyIter()
+ iter, err = cr.NewRawRangeKeyIter()
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/cockroachdb/pebble/version_set.go b/vendor/github.com/cockroachdb/pebble/version_set.go
index 2a466b1f92..c5e1a8b439 100644
--- a/vendor/github.com/cockroachdb/pebble/version_set.go
+++ b/vendor/github.com/cockroachdb/pebble/version_set.go
@@ -110,9 +110,10 @@ type versionSet struct {
manifestFileNum FileNum
manifestMarker *atomicfs.Marker
- manifestFile vfs.File
- manifest *record.Writer
- setCurrent func(FileNum) error
+ manifestFile vfs.File
+ manifest *record.Writer
+ setCurrent func(FileNum) error
+ getFormatMajorVersion func() FormatMajorVersion
writing bool
writerCond sync.Cond
@@ -125,6 +126,7 @@ func (vs *versionSet) init(
opts *Options,
marker *atomicfs.Marker,
setCurrent func(FileNum) error,
+ getFMV func() FormatMajorVersion,
mu *sync.Mutex,
) {
vs.dirname = dirname
@@ -142,6 +144,7 @@ func (vs *versionSet) init(
vs.nextFileNum = 1
vs.manifestMarker = marker
vs.setCurrent = setCurrent
+ vs.getFormatMajorVersion = getFMV
}
// create creates a version set for a fresh DB.
@@ -151,9 +154,10 @@ func (vs *versionSet) create(
opts *Options,
marker *atomicfs.Marker,
setCurrent func(FileNum) error,
+ getFormatMajorVersion func() FormatMajorVersion,
mu *sync.Mutex,
) error {
- vs.init(dirname, opts, marker, setCurrent, mu)
+ vs.init(dirname, opts, marker, setCurrent, getFormatMajorVersion, mu)
newVersion := &version{}
vs.append(newVersion)
var err error
@@ -199,9 +203,10 @@ func (vs *versionSet) load(
manifestFileNum FileNum,
marker *atomicfs.Marker,
setCurrent func(FileNum) error,
+ getFormatMajorVersion func() FormatMajorVersion,
mu *sync.Mutex,
) error {
- vs.init(dirname, opts, marker, setCurrent, mu)
+ vs.init(dirname, opts, marker, setCurrent, getFormatMajorVersion, mu)
vs.manifestFileNum = manifestFileNum
manifestPath := base.MakeFilepath(opts.FS, dirname, fileTypeManifest, vs.manifestFileNum.DiskFileNum())
@@ -484,7 +489,7 @@ func (vs *versionSet) logAndApply(
defer vs.mu.Lock()
var err error
- if vs.opts.FormatMajorVersion < FormatVirtualSSTables && len(ve.CreatedBackingTables) > 0 {
+ if vs.getFormatMajorVersion() < FormatVirtualSSTables && len(ve.CreatedBackingTables) > 0 {
return errors.AssertionFailedf("MANIFEST cannot contain virtual sstable records due to format major version")
}
newVersion, zombies, err = manifest.AccumulateIncompleteAndApplySingleVE(
diff --git a/vendor/github.com/dgraph-io/badger/v4/README.md b/vendor/github.com/dgraph-io/badger/v4/README.md
index 29de9cbbf6..38a834f9ee 100644
--- a/vendor/github.com/dgraph-io/badger/v4/README.md
+++ b/vendor/github.com/dgraph-io/badger/v4/README.md
@@ -39,7 +39,7 @@ Please consult the [Changelog] for more detailed information on releases.
For more details on our version naming schema please read [Choosing a version](#choosing-a-version).
-[Changelog]:https://github.com/dgraph-io/badger/blob/master/CHANGELOG.md
+[Changelog]:https://github.com/dgraph-io/badger/blob/main/CHANGELOG.md
## Table of Contents
- [BadgerDB](#badgerdb)
@@ -62,7 +62,7 @@ For more details on our version naming schema please read [Choosing a version](#
## Getting Started
### Installing
-To start using Badger, install Go 1.19 or above. Badger v3 needs go modules. From your project, run the following command
+To start using Badger, install Go 1.19 or above. Badger v3 and above needs go modules. From your project, run the following command
```sh
$ go get github.com/dgraph-io/badger/v4
diff --git a/vendor/github.com/dgraph-io/badger/v4/table/builder.go b/vendor/github.com/dgraph-io/badger/v4/table/builder.go
index 5c9e065e0c..bf0ac319a8 100644
--- a/vendor/github.com/dgraph-io/badger/v4/table/builder.go
+++ b/vendor/github.com/dgraph-io/badger/v4/table/builder.go
@@ -25,8 +25,8 @@ import (
"unsafe"
"github.com/golang/protobuf/proto"
- "github.com/golang/snappy"
fbs "github.com/google/flatbuffers/go"
+ "github.com/klauspost/compress/s2"
"github.com/pkg/errors"
"github.com/dgraph-io/badger/v4/fb"
@@ -159,7 +159,7 @@ func NewTableBuilder(opts Options) *Builder {
func maxEncodedLen(ctype options.CompressionType, sz int) int {
switch ctype {
case options.Snappy:
- return snappy.MaxEncodedLen(sz)
+ return s2.MaxEncodedLen(sz)
case options.ZSTD:
return y.ZSTDCompressBound(sz)
}
@@ -523,9 +523,9 @@ func (b *Builder) compressData(data []byte) ([]byte, error) {
case options.None:
return data, nil
case options.Snappy:
- sz := snappy.MaxEncodedLen(len(data))
+ sz := s2.MaxEncodedLen(len(data))
dst := b.alloc.Allocate(sz)
- return snappy.Encode(dst, data), nil
+ return s2.EncodeSnappy(dst, data), nil
case options.ZSTD:
sz := y.ZSTDCompressBound(len(data))
dst := b.alloc.Allocate(sz)
diff --git a/vendor/github.com/dgraph-io/badger/v4/table/table.go b/vendor/github.com/dgraph-io/badger/v4/table/table.go
index 0bbc910892..010cbd1cf9 100644
--- a/vendor/github.com/dgraph-io/badger/v4/table/table.go
+++ b/vendor/github.com/dgraph-io/badger/v4/table/table.go
@@ -32,7 +32,8 @@ import (
"unsafe"
"github.com/golang/protobuf/proto"
- "github.com/golang/snappy"
+ "github.com/klauspost/compress/snappy"
+ "github.com/klauspost/compress/zstd"
"github.com/pkg/errors"
"github.com/dgraph-io/badger/v4/fb"
@@ -818,6 +819,11 @@ func (t *Table) decompress(b *block) error {
}
case options.ZSTD:
sz := int(float64(t.opt.BlockSize) * 1.2)
+ // Get frame content size from header.
+ var hdr zstd.Header
+ if err := hdr.Decode(b.data); err == nil && hdr.HasFCS && hdr.FrameContentSize < uint64(t.opt.BlockSize*2) {
+ sz = int(hdr.FrameContentSize)
+ }
dst = z.Calloc(sz, "Table.Decompress")
b.data, err = y.ZSTDDecompress(dst, b.data)
if err != nil {
diff --git a/vendor/github.com/dop251/goja/array.go b/vendor/github.com/dop251/goja/array.go
index 8b09df8b53..7a67a47c16 100644
--- a/vendor/github.com/dop251/goja/array.go
+++ b/vendor/github.com/dop251/goja/array.go
@@ -332,6 +332,18 @@ func (a *arrayObject) hasOwnPropertyIdx(idx valueInt) bool {
return a.baseObject.hasOwnPropertyStr(idx.string())
}
+func (a *arrayObject) hasPropertyIdx(idx valueInt) bool {
+ if a.hasOwnPropertyIdx(idx) {
+ return true
+ }
+
+ if a.prototype != nil {
+ return a.prototype.self.hasPropertyIdx(idx)
+ }
+
+ return false
+}
+
func (a *arrayObject) expand(idx uint32) bool {
targetLen := idx + 1
if targetLen > uint32(len(a.values)) {
@@ -509,7 +521,7 @@ func (a *arrayObject) exportType() reflect.Type {
func (a *arrayObject) exportToArrayOrSlice(dst reflect.Value, typ reflect.Type, ctx *objectExportCtx) error {
r := a.val.runtime
- if iter := a.getSym(SymIterator, nil); iter == r.global.arrayValues || iter == nil {
+ if iter := a.getSym(SymIterator, nil); iter == r.getArrayValues() || iter == nil {
l := toIntStrict(int64(a.length))
if typ.Kind() == reflect.Array {
if dst.Len() != l {
diff --git a/vendor/github.com/dop251/goja/array_sparse.go b/vendor/github.com/dop251/goja/array_sparse.go
index 201bc6faa1..f99afd7e6d 100644
--- a/vendor/github.com/dop251/goja/array_sparse.go
+++ b/vendor/github.com/dop251/goja/array_sparse.go
@@ -302,6 +302,18 @@ func (a *sparseArrayObject) hasOwnPropertyIdx(idx valueInt) bool {
return a.baseObject.hasOwnPropertyStr(idx.string())
}
+func (a *sparseArrayObject) hasPropertyIdx(idx valueInt) bool {
+ if a.hasOwnPropertyIdx(idx) {
+ return true
+ }
+
+ if a.prototype != nil {
+ return a.prototype.self.hasPropertyIdx(idx)
+ }
+
+ return false
+}
+
func (a *sparseArrayObject) expand(idx uint32) bool {
if l := len(a.items); l >= 1024 {
if ii := a.items[l-1].idx; ii > idx {
@@ -458,7 +470,7 @@ func (a *sparseArrayObject) exportType() reflect.Type {
func (a *sparseArrayObject) exportToArrayOrSlice(dst reflect.Value, typ reflect.Type, ctx *objectExportCtx) error {
r := a.val.runtime
- if iter := a.getSym(SymIterator, nil); iter == r.global.arrayValues || iter == nil {
+ if iter := a.getSym(SymIterator, nil); iter == r.getArrayValues() || iter == nil {
l := toIntStrict(int64(a.length))
if typ.Kind() == reflect.Array {
if dst.Len() != l {
diff --git a/vendor/github.com/dop251/goja/ast/node.go b/vendor/github.com/dop251/goja/ast/node.go
index 176c1f3b03..3bec89db49 100644
--- a/vendor/github.com/dop251/goja/ast/node.go
+++ b/vendor/github.com/dop251/goja/ast/node.go
@@ -384,9 +384,10 @@ type (
}
DoWhileStatement struct {
- Do file.Idx
- Test Expression
- Body Statement
+ Do file.Idx
+ Test Expression
+ Body Statement
+ RightParenthesis file.Idx
}
EmptyStatement struct {
@@ -442,6 +443,7 @@ type (
Discriminant Expression
Default int
Body []*CaseStatement
+ RightBrace file.Idx
}
ThrowStatement struct {
@@ -669,8 +671,13 @@ func (self *TemplateElement) Idx0() file.Idx { return self.Idx }
func (self *TemplateLiteral) Idx0() file.Idx { return self.OpenQuote }
func (self *ThisExpression) Idx0() file.Idx { return self.Idx }
func (self *SuperExpression) Idx0() file.Idx { return self.Idx }
-func (self *UnaryExpression) Idx0() file.Idx { return self.Idx }
-func (self *MetaProperty) Idx0() file.Idx { return self.Idx }
+func (self *UnaryExpression) Idx0() file.Idx {
+ if self.Postfix {
+ return self.Operand.Idx0()
+ }
+ return self.Idx
+}
+func (self *MetaProperty) Idx0() file.Idx { return self.Idx }
func (self *BadStatement) Idx0() file.Idx { return self.From }
func (self *BlockStatement) Idx0() file.Idx { return self.LeftBrace }
@@ -728,7 +735,7 @@ func (self *BinaryExpression) Idx1() file.Idx { return self.Right.Idx1() }
func (self *BooleanLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Literal)) }
func (self *BracketExpression) Idx1() file.Idx { return self.RightBracket + 1 }
func (self *CallExpression) Idx1() file.Idx { return self.RightParenthesis + 1 }
-func (self *ConditionalExpression) Idx1() file.Idx { return self.Test.Idx1() }
+func (self *ConditionalExpression) Idx1() file.Idx { return self.Alternate.Idx1() }
func (self *DotExpression) Idx1() file.Idx { return self.Identifier.Idx1() }
func (self *PrivateDotExpression) Idx1() file.Idx { return self.Identifier.Idx1() }
func (self *FunctionLiteral) Idx1() file.Idx { return self.Body.Idx1() }
@@ -764,13 +771,18 @@ func (self *MetaProperty) Idx1() file.Idx {
return self.Property.Idx1()
}
-func (self *BadStatement) Idx1() file.Idx { return self.To }
-func (self *BlockStatement) Idx1() file.Idx { return self.RightBrace + 1 }
-func (self *BranchStatement) Idx1() file.Idx { return self.Idx }
+func (self *BadStatement) Idx1() file.Idx { return self.To }
+func (self *BlockStatement) Idx1() file.Idx { return self.RightBrace + 1 }
+func (self *BranchStatement) Idx1() file.Idx {
+ if self.Label == nil {
+ return file.Idx(int(self.Idx) + len(self.Token.String()))
+ }
+ return self.Label.Idx1()
+}
func (self *CaseStatement) Idx1() file.Idx { return self.Consequent[len(self.Consequent)-1].Idx1() }
func (self *CatchStatement) Idx1() file.Idx { return self.Body.Idx1() }
func (self *DebuggerStatement) Idx1() file.Idx { return self.Debugger + 8 }
-func (self *DoWhileStatement) Idx1() file.Idx { return self.Test.Idx1() }
+func (self *DoWhileStatement) Idx1() file.Idx { return self.RightParenthesis + 1 }
func (self *EmptyStatement) Idx1() file.Idx { return self.Semicolon + 1 }
func (self *ExpressionStatement) Idx1() file.Idx { return self.Expression.Idx1() }
func (self *ForInStatement) Idx1() file.Idx { return self.Body.Idx1() }
@@ -782,11 +794,16 @@ func (self *IfStatement) Idx1() file.Idx {
}
return self.Consequent.Idx1()
}
-func (self *LabelledStatement) Idx1() file.Idx { return self.Colon + 1 }
+func (self *LabelledStatement) Idx1() file.Idx { return self.Statement.Idx1() }
func (self *Program) Idx1() file.Idx { return self.Body[len(self.Body)-1].Idx1() }
-func (self *ReturnStatement) Idx1() file.Idx { return self.Return + 6 }
-func (self *SwitchStatement) Idx1() file.Idx { return self.Body[len(self.Body)-1].Idx1() }
-func (self *ThrowStatement) Idx1() file.Idx { return self.Argument.Idx1() }
+func (self *ReturnStatement) Idx1() file.Idx {
+ if self.Argument != nil {
+ return self.Argument.Idx1()
+ }
+ return self.Return + 6
+}
+func (self *SwitchStatement) Idx1() file.Idx { return self.RightBrace + 1 }
+func (self *ThrowStatement) Idx1() file.Idx { return self.Argument.Idx1() }
func (self *TryStatement) Idx1() file.Idx {
if self.Finally != nil {
return self.Finally.Idx1()
diff --git a/vendor/github.com/dop251/goja/builtin_array.go b/vendor/github.com/dop251/goja/builtin_array.go
index 6ff244b6dc..6ba8802c28 100644
--- a/vendor/github.com/dop251/goja/builtin_array.go
+++ b/vendor/github.com/dop251/goja/builtin_array.go
@@ -3,6 +3,7 @@ package goja
import (
"math"
"sort"
+ "sync"
)
func (r *Runtime) newArray(prototype *Object) (a *arrayObject) {
@@ -19,7 +20,7 @@ func (r *Runtime) newArray(prototype *Object) (a *arrayObject) {
}
func (r *Runtime) newArrayObject() *arrayObject {
- return r.newArray(r.global.ArrayPrototype)
+ return r.newArray(r.getArrayPrototype())
}
func setArrayValues(a *arrayObject, values []Value) *arrayObject {
@@ -96,7 +97,7 @@ func (r *Runtime) builtin_newArray(args []Value, proto *Object) *Object {
if float64(al) == float64(f) {
return r.newArrayLength(al)
} else {
- panic(r.newError(r.global.RangeError, "Invalid array length"))
+ panic(r.newError(r.getRangeError(), "Invalid array length"))
}
}
return setArrayValues(r.newArray(proto), []Value{args[0]}).val
@@ -1259,7 +1260,7 @@ func (r *Runtime) checkStdArray(v Value) *arrayObject {
func (r *Runtime) checkStdArrayIter(v Value) *arrayObject {
if arr := r.checkStdArray(v); arr != nil &&
- arr.getSym(SymIterator, nil) == r.global.arrayValues {
+ arr.getSym(SymIterator, nil) == r.getArrayValues() {
return arr
}
@@ -1398,80 +1399,110 @@ func (r *Runtime) arrayIterProto_next(call FunctionCall) Value {
panic(r.NewTypeError("Method Array Iterator.prototype.next called on incompatible receiver %s", r.objectproto_toString(FunctionCall{This: thisObj})))
}
-func (r *Runtime) createArrayProto(val *Object) objectImpl {
- o := &arrayObject{
- baseObject: baseObject{
- class: classArray,
- val: val,
- extensible: true,
- prototype: r.global.ObjectPrototype,
- },
- }
- o.init()
-
- o._putProp("at", r.newNativeFunc(r.arrayproto_at, nil, "at", nil, 1), true, false, true)
- o._putProp("constructor", r.global.Array, true, false, true)
- o._putProp("concat", r.newNativeFunc(r.arrayproto_concat, nil, "concat", nil, 1), true, false, true)
- o._putProp("copyWithin", r.newNativeFunc(r.arrayproto_copyWithin, nil, "copyWithin", nil, 2), true, false, true)
- o._putProp("entries", r.newNativeFunc(r.arrayproto_entries, nil, "entries", nil, 0), true, false, true)
- o._putProp("every", r.newNativeFunc(r.arrayproto_every, nil, "every", nil, 1), true, false, true)
- o._putProp("fill", r.newNativeFunc(r.arrayproto_fill, nil, "fill", nil, 1), true, false, true)
- o._putProp("filter", r.newNativeFunc(r.arrayproto_filter, nil, "filter", nil, 1), true, false, true)
- o._putProp("find", r.newNativeFunc(r.arrayproto_find, nil, "find", nil, 1), true, false, true)
- o._putProp("findIndex", r.newNativeFunc(r.arrayproto_findIndex, nil, "findIndex", nil, 1), true, false, true)
- o._putProp("findLast", r.newNativeFunc(r.arrayproto_findLast, nil, "findLast", nil, 1), true, false, true)
- o._putProp("findLastIndex", r.newNativeFunc(r.arrayproto_findLastIndex, nil, "findLastIndex", nil, 1), true, false, true)
- o._putProp("flat", r.newNativeFunc(r.arrayproto_flat, nil, "flat", nil, 0), true, false, true)
- o._putProp("flatMap", r.newNativeFunc(r.arrayproto_flatMap, nil, "flatMap", nil, 1), true, false, true)
- o._putProp("forEach", r.newNativeFunc(r.arrayproto_forEach, nil, "forEach", nil, 1), true, false, true)
- o._putProp("includes", r.newNativeFunc(r.arrayproto_includes, nil, "includes", nil, 1), true, false, true)
- o._putProp("indexOf", r.newNativeFunc(r.arrayproto_indexOf, nil, "indexOf", nil, 1), true, false, true)
- o._putProp("join", r.newNativeFunc(r.arrayproto_join, nil, "join", nil, 1), true, false, true)
- o._putProp("keys", r.newNativeFunc(r.arrayproto_keys, nil, "keys", nil, 0), true, false, true)
- o._putProp("lastIndexOf", r.newNativeFunc(r.arrayproto_lastIndexOf, nil, "lastIndexOf", nil, 1), true, false, true)
- o._putProp("map", r.newNativeFunc(r.arrayproto_map, nil, "map", nil, 1), true, false, true)
- o._putProp("pop", r.newNativeFunc(r.arrayproto_pop, nil, "pop", nil, 0), true, false, true)
- o._putProp("push", r.newNativeFunc(r.arrayproto_push, nil, "push", nil, 1), true, false, true)
- o._putProp("reduce", r.newNativeFunc(r.arrayproto_reduce, nil, "reduce", nil, 1), true, false, true)
- o._putProp("reduceRight", r.newNativeFunc(r.arrayproto_reduceRight, nil, "reduceRight", nil, 1), true, false, true)
- o._putProp("reverse", r.newNativeFunc(r.arrayproto_reverse, nil, "reverse", nil, 0), true, false, true)
- o._putProp("shift", r.newNativeFunc(r.arrayproto_shift, nil, "shift", nil, 0), true, false, true)
- o._putProp("slice", r.newNativeFunc(r.arrayproto_slice, nil, "slice", nil, 2), true, false, true)
- o._putProp("some", r.newNativeFunc(r.arrayproto_some, nil, "some", nil, 1), true, false, true)
- o._putProp("sort", r.newNativeFunc(r.arrayproto_sort, nil, "sort", nil, 1), true, false, true)
- o._putProp("splice", r.newNativeFunc(r.arrayproto_splice, nil, "splice", nil, 2), true, false, true)
- o._putProp("toLocaleString", r.newNativeFunc(r.arrayproto_toLocaleString, nil, "toLocaleString", nil, 0), true, false, true)
- o._putProp("toString", r.global.arrayToString, true, false, true)
- o._putProp("unshift", r.newNativeFunc(r.arrayproto_unshift, nil, "unshift", nil, 1), true, false, true)
- o._putProp("values", r.global.arrayValues, true, false, true)
-
- o._putSym(SymIterator, valueProp(r.global.arrayValues, true, false, true))
-
- bl := r.newBaseObject(nil, classObject)
- bl.setOwnStr("copyWithin", valueTrue, true)
- bl.setOwnStr("entries", valueTrue, true)
- bl.setOwnStr("fill", valueTrue, true)
- bl.setOwnStr("find", valueTrue, true)
- bl.setOwnStr("findIndex", valueTrue, true)
- bl.setOwnStr("findLast", valueTrue, true)
- bl.setOwnStr("findLastIndex", valueTrue, true)
- bl.setOwnStr("flat", valueTrue, true)
- bl.setOwnStr("flatMap", valueTrue, true)
- bl.setOwnStr("includes", valueTrue, true)
- bl.setOwnStr("keys", valueTrue, true)
- bl.setOwnStr("values", valueTrue, true)
- bl.setOwnStr("groupBy", valueTrue, true)
- bl.setOwnStr("groupByToMap", valueTrue, true)
- o._putSym(SymUnscopables, valueProp(bl.val, false, false, true))
+func createArrayProtoTemplate() *objectTemplate {
+ t := newObjectTemplate()
+ t.protoFactory = func(r *Runtime) *Object {
+ return r.global.ObjectPrototype
+ }
+
+ t.putStr("length", func(r *Runtime) Value { return valueProp(_positiveZero, true, false, false) })
+
+ t.putStr("constructor", func(r *Runtime) Value { return valueProp(r.getArray(), true, false, true) })
+
+ t.putStr("at", func(r *Runtime) Value { return r.methodProp(r.arrayproto_at, "at", 1) })
+ t.putStr("concat", func(r *Runtime) Value { return r.methodProp(r.arrayproto_concat, "concat", 1) })
+ t.putStr("copyWithin", func(r *Runtime) Value { return r.methodProp(r.arrayproto_copyWithin, "copyWithin", 2) })
+ t.putStr("entries", func(r *Runtime) Value { return r.methodProp(r.arrayproto_entries, "entries", 0) })
+ t.putStr("every", func(r *Runtime) Value { return r.methodProp(r.arrayproto_every, "every", 1) })
+ t.putStr("fill", func(r *Runtime) Value { return r.methodProp(r.arrayproto_fill, "fill", 1) })
+ t.putStr("filter", func(r *Runtime) Value { return r.methodProp(r.arrayproto_filter, "filter", 1) })
+ t.putStr("find", func(r *Runtime) Value { return r.methodProp(r.arrayproto_find, "find", 1) })
+ t.putStr("findIndex", func(r *Runtime) Value { return r.methodProp(r.arrayproto_findIndex, "findIndex", 1) })
+ t.putStr("findLast", func(r *Runtime) Value { return r.methodProp(r.arrayproto_findLast, "findLast", 1) })
+ t.putStr("findLastIndex", func(r *Runtime) Value { return r.methodProp(r.arrayproto_findLastIndex, "findLastIndex", 1) })
+ t.putStr("flat", func(r *Runtime) Value { return r.methodProp(r.arrayproto_flat, "flat", 0) })
+ t.putStr("flatMap", func(r *Runtime) Value { return r.methodProp(r.arrayproto_flatMap, "flatMap", 1) })
+ t.putStr("forEach", func(r *Runtime) Value { return r.methodProp(r.arrayproto_forEach, "forEach", 1) })
+ t.putStr("includes", func(r *Runtime) Value { return r.methodProp(r.arrayproto_includes, "includes", 1) })
+ t.putStr("indexOf", func(r *Runtime) Value { return r.methodProp(r.arrayproto_indexOf, "indexOf", 1) })
+ t.putStr("join", func(r *Runtime) Value { return r.methodProp(r.arrayproto_join, "join", 1) })
+ t.putStr("keys", func(r *Runtime) Value { return r.methodProp(r.arrayproto_keys, "keys", 0) })
+ t.putStr("lastIndexOf", func(r *Runtime) Value { return r.methodProp(r.arrayproto_lastIndexOf, "lastIndexOf", 1) })
+ t.putStr("map", func(r *Runtime) Value { return r.methodProp(r.arrayproto_map, "map", 1) })
+ t.putStr("pop", func(r *Runtime) Value { return r.methodProp(r.arrayproto_pop, "pop", 0) })
+ t.putStr("push", func(r *Runtime) Value { return r.methodProp(r.arrayproto_push, "push", 1) })
+ t.putStr("reduce", func(r *Runtime) Value { return r.methodProp(r.arrayproto_reduce, "reduce", 1) })
+ t.putStr("reduceRight", func(r *Runtime) Value { return r.methodProp(r.arrayproto_reduceRight, "reduceRight", 1) })
+ t.putStr("reverse", func(r *Runtime) Value { return r.methodProp(r.arrayproto_reverse, "reverse", 0) })
+ t.putStr("shift", func(r *Runtime) Value { return r.methodProp(r.arrayproto_shift, "shift", 0) })
+ t.putStr("slice", func(r *Runtime) Value { return r.methodProp(r.arrayproto_slice, "slice", 2) })
+ t.putStr("some", func(r *Runtime) Value { return r.methodProp(r.arrayproto_some, "some", 1) })
+ t.putStr("sort", func(r *Runtime) Value { return r.methodProp(r.arrayproto_sort, "sort", 1) })
+ t.putStr("splice", func(r *Runtime) Value { return r.methodProp(r.arrayproto_splice, "splice", 2) })
+ t.putStr("toLocaleString", func(r *Runtime) Value { return r.methodProp(r.arrayproto_toLocaleString, "toLocaleString", 0) })
+ t.putStr("toString", func(r *Runtime) Value { return valueProp(r.getArrayToString(), true, false, true) })
+ t.putStr("unshift", func(r *Runtime) Value { return r.methodProp(r.arrayproto_unshift, "unshift", 1) })
+ t.putStr("values", func(r *Runtime) Value { return valueProp(r.getArrayValues(), true, false, true) })
+
+ t.putSym(SymIterator, func(r *Runtime) Value { return valueProp(r.getArrayValues(), true, false, true) })
+ t.putSym(SymUnscopables, func(r *Runtime) Value {
+ bl := r.newBaseObject(nil, classObject)
+ bl.setOwnStr("copyWithin", valueTrue, true)
+ bl.setOwnStr("entries", valueTrue, true)
+ bl.setOwnStr("fill", valueTrue, true)
+ bl.setOwnStr("find", valueTrue, true)
+ bl.setOwnStr("findIndex", valueTrue, true)
+ bl.setOwnStr("findLast", valueTrue, true)
+ bl.setOwnStr("findLastIndex", valueTrue, true)
+ bl.setOwnStr("flat", valueTrue, true)
+ bl.setOwnStr("flatMap", valueTrue, true)
+ bl.setOwnStr("includes", valueTrue, true)
+ bl.setOwnStr("keys", valueTrue, true)
+ bl.setOwnStr("values", valueTrue, true)
+ bl.setOwnStr("groupBy", valueTrue, true)
+ bl.setOwnStr("groupByToMap", valueTrue, true)
+
+ return valueProp(bl.val, false, false, true)
+ })
- return o
+ return t
+}
+
+var arrayProtoTemplate *objectTemplate
+var arrayProtoTemplateOnce sync.Once
+
+func getArrayProtoTemplate() *objectTemplate {
+ arrayProtoTemplateOnce.Do(func() {
+ arrayProtoTemplate = createArrayProtoTemplate()
+ })
+ return arrayProtoTemplate
+}
+
+func (r *Runtime) getArrayPrototype() *Object {
+ ret := r.global.ArrayPrototype
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.ArrayPrototype = ret
+ r.newTemplatedArrayObject(getArrayProtoTemplate(), ret)
+ }
+ return ret
+}
+
+func (r *Runtime) getArray() *Object {
+ ret := r.global.Array
+ if ret == nil {
+ ret = &Object{runtime: r}
+ ret.self = r.createArray(ret)
+ r.global.Array = ret
+ }
+ return ret
}
func (r *Runtime) createArray(val *Object) objectImpl {
- o := r.newNativeFuncConstructObj(val, r.builtin_newArray, "Array", r.global.ArrayPrototype, 1)
- o._putProp("from", r.newNativeFunc(r.array_from, nil, "from", nil, 1), true, false, true)
- o._putProp("isArray", r.newNativeFunc(r.array_isArray, nil, "isArray", nil, 1), true, false, true)
- o._putProp("of", r.newNativeFunc(r.array_of, nil, "of", nil, 0), true, false, true)
+ o := r.newNativeFuncConstructObj(val, r.builtin_newArray, "Array", r.getArrayPrototype(), 1)
+ o._putProp("from", r.newNativeFunc(r.array_from, "from", 1), true, false, true)
+ o._putProp("isArray", r.newNativeFunc(r.array_isArray, "isArray", 1), true, false, true)
+ o._putProp("of", r.newNativeFunc(r.array_of, "of", 0), true, false, true)
r.putSpeciesReturnThis(o)
return o
@@ -1480,20 +1511,28 @@ func (r *Runtime) createArray(val *Object) objectImpl {
func (r *Runtime) createArrayIterProto(val *Object) objectImpl {
o := newBaseObjectObj(val, r.getIteratorPrototype(), classObject)
- o._putProp("next", r.newNativeFunc(r.arrayIterProto_next, nil, "next", nil, 0), true, false, true)
+ o._putProp("next", r.newNativeFunc(r.arrayIterProto_next, "next", 0), true, false, true)
o._putSym(SymToStringTag, valueProp(asciiString(classArrayIterator), false, false, true))
return o
}
-func (r *Runtime) initArray() {
- r.global.arrayValues = r.newNativeFunc(r.arrayproto_values, nil, "values", nil, 0)
- r.global.arrayToString = r.newNativeFunc(r.arrayproto_toString, nil, "toString", nil, 0)
-
- r.global.ArrayPrototype = r.newLazyObject(r.createArrayProto)
+func (r *Runtime) getArrayValues() *Object {
+ ret := r.global.arrayValues
+ if ret == nil {
+ ret = r.newNativeFunc(r.arrayproto_values, "values", 0)
+ r.global.arrayValues = ret
+ }
+ return ret
+}
- r.global.Array = r.newLazyObject(r.createArray)
- r.addToGlobal("Array", r.global.Array)
+func (r *Runtime) getArrayToString() *Object {
+ ret := r.global.arrayToString
+ if ret == nil {
+ ret = r.newNativeFunc(r.arrayproto_toString, "toString", 0)
+ r.global.arrayToString = ret
+ }
+ return ret
}
func (r *Runtime) getArrayIteratorPrototype() *Object {
diff --git a/vendor/github.com/dop251/goja/builtin_boolean.go b/vendor/github.com/dop251/goja/builtin_boolean.go
index b065615d28..8476328511 100644
--- a/vendor/github.com/dop251/goja/builtin_boolean.go
+++ b/vendor/github.com/dop251/goja/builtin_boolean.go
@@ -49,12 +49,27 @@ func (r *Runtime) booleanproto_valueOf(call FunctionCall) Value {
return nil
}
-func (r *Runtime) initBoolean() {
- r.global.BooleanPrototype = r.newPrimitiveObject(valueFalse, r.global.ObjectPrototype, classBoolean)
- o := r.global.BooleanPrototype.self
- o._putProp("toString", r.newNativeFunc(r.booleanproto_toString, nil, "toString", nil, 0), true, false, true)
- o._putProp("valueOf", r.newNativeFunc(r.booleanproto_valueOf, nil, "valueOf", nil, 0), true, false, true)
+func (r *Runtime) getBooleanPrototype() *Object {
+ ret := r.global.BooleanPrototype
+ if ret == nil {
+ ret = r.newPrimitiveObject(valueFalse, r.global.ObjectPrototype, classBoolean)
+ r.global.BooleanPrototype = ret
+ o := ret.self
+ o._putProp("toString", r.newNativeFunc(r.booleanproto_toString, "toString", 0), true, false, true)
+ o._putProp("valueOf", r.newNativeFunc(r.booleanproto_valueOf, "valueOf", 0), true, false, true)
+ o._putProp("constructor", r.getBoolean(), true, false, true)
+ }
+ return ret
+}
- r.global.Boolean = r.newNativeFunc(r.builtin_Boolean, r.builtin_newBoolean, "Boolean", r.global.BooleanPrototype, 1)
- r.addToGlobal("Boolean", r.global.Boolean)
+func (r *Runtime) getBoolean() *Object {
+ ret := r.global.Boolean
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Boolean = ret
+ proto := r.getBooleanPrototype()
+ r.newNativeFuncAndConstruct(ret, r.builtin_Boolean,
+ r.wrapNativeConstruct(r.builtin_newBoolean, ret, proto), proto, "Boolean", intToValue(1))
+ }
+ return ret
}
diff --git a/vendor/github.com/dop251/goja/builtin_date.go b/vendor/github.com/dop251/goja/builtin_date.go
index 50070a2906..84a80ac02b 100644
--- a/vendor/github.com/dop251/goja/builtin_date.go
+++ b/vendor/github.com/dop251/goja/builtin_date.go
@@ -3,6 +3,7 @@ package goja
import (
"fmt"
"math"
+ "sync"
"time"
)
@@ -133,7 +134,7 @@ func (r *Runtime) dateproto_toISOString(call FunctionCall) Value {
// extended year
return asciiString(fmt.Sprintf("%+06d-", year) + utc.Format(isoDateTimeLayout[5:]))
} else {
- panic(r.newError(r.global.RangeError, "Invalid time value"))
+ panic(r.newError(r.getRangeError(), "Invalid time value"))
}
}
panic(r.NewTypeError("Method Date.prototype.toISOString is called on incompatible receiver"))
@@ -938,78 +939,120 @@ func (r *Runtime) dateproto_setUTCFullYear(call FunctionCall) Value {
panic(r.NewTypeError("Method Date.prototype.setUTCFullYear is called on incompatible receiver"))
}
-func (r *Runtime) createDateProto(val *Object) objectImpl {
- o := &baseObject{
- class: classObject,
- val: val,
- extensible: true,
- prototype: r.global.ObjectPrototype,
- }
- o.init()
-
- o._putProp("constructor", r.global.Date, true, false, true)
- o._putProp("toString", r.newNativeFunc(r.dateproto_toString, nil, "toString", nil, 0), true, false, true)
- o._putProp("toDateString", r.newNativeFunc(r.dateproto_toDateString, nil, "toDateString", nil, 0), true, false, true)
- o._putProp("toTimeString", r.newNativeFunc(r.dateproto_toTimeString, nil, "toTimeString", nil, 0), true, false, true)
- o._putProp("toLocaleString", r.newNativeFunc(r.dateproto_toLocaleString, nil, "toLocaleString", nil, 0), true, false, true)
- o._putProp("toLocaleDateString", r.newNativeFunc(r.dateproto_toLocaleDateString, nil, "toLocaleDateString", nil, 0), true, false, true)
- o._putProp("toLocaleTimeString", r.newNativeFunc(r.dateproto_toLocaleTimeString, nil, "toLocaleTimeString", nil, 0), true, false, true)
- o._putProp("valueOf", r.newNativeFunc(r.dateproto_valueOf, nil, "valueOf", nil, 0), true, false, true)
- o._putProp("getTime", r.newNativeFunc(r.dateproto_getTime, nil, "getTime", nil, 0), true, false, true)
- o._putProp("getFullYear", r.newNativeFunc(r.dateproto_getFullYear, nil, "getFullYear", nil, 0), true, false, true)
- o._putProp("getUTCFullYear", r.newNativeFunc(r.dateproto_getUTCFullYear, nil, "getUTCFullYear", nil, 0), true, false, true)
- o._putProp("getMonth", r.newNativeFunc(r.dateproto_getMonth, nil, "getMonth", nil, 0), true, false, true)
- o._putProp("getUTCMonth", r.newNativeFunc(r.dateproto_getUTCMonth, nil, "getUTCMonth", nil, 0), true, false, true)
- o._putProp("getDate", r.newNativeFunc(r.dateproto_getDate, nil, "getDate", nil, 0), true, false, true)
- o._putProp("getUTCDate", r.newNativeFunc(r.dateproto_getUTCDate, nil, "getUTCDate", nil, 0), true, false, true)
- o._putProp("getDay", r.newNativeFunc(r.dateproto_getDay, nil, "getDay", nil, 0), true, false, true)
- o._putProp("getUTCDay", r.newNativeFunc(r.dateproto_getUTCDay, nil, "getUTCDay", nil, 0), true, false, true)
- o._putProp("getHours", r.newNativeFunc(r.dateproto_getHours, nil, "getHours", nil, 0), true, false, true)
- o._putProp("getUTCHours", r.newNativeFunc(r.dateproto_getUTCHours, nil, "getUTCHours", nil, 0), true, false, true)
- o._putProp("getMinutes", r.newNativeFunc(r.dateproto_getMinutes, nil, "getMinutes", nil, 0), true, false, true)
- o._putProp("getUTCMinutes", r.newNativeFunc(r.dateproto_getUTCMinutes, nil, "getUTCMinutes", nil, 0), true, false, true)
- o._putProp("getSeconds", r.newNativeFunc(r.dateproto_getSeconds, nil, "getSeconds", nil, 0), true, false, true)
- o._putProp("getUTCSeconds", r.newNativeFunc(r.dateproto_getUTCSeconds, nil, "getUTCSeconds", nil, 0), true, false, true)
- o._putProp("getMilliseconds", r.newNativeFunc(r.dateproto_getMilliseconds, nil, "getMilliseconds", nil, 0), true, false, true)
- o._putProp("getUTCMilliseconds", r.newNativeFunc(r.dateproto_getUTCMilliseconds, nil, "getUTCMilliseconds", nil, 0), true, false, true)
- o._putProp("getTimezoneOffset", r.newNativeFunc(r.dateproto_getTimezoneOffset, nil, "getTimezoneOffset", nil, 0), true, false, true)
- o._putProp("setTime", r.newNativeFunc(r.dateproto_setTime, nil, "setTime", nil, 1), true, false, true)
- o._putProp("setMilliseconds", r.newNativeFunc(r.dateproto_setMilliseconds, nil, "setMilliseconds", nil, 1), true, false, true)
- o._putProp("setUTCMilliseconds", r.newNativeFunc(r.dateproto_setUTCMilliseconds, nil, "setUTCMilliseconds", nil, 1), true, false, true)
- o._putProp("setSeconds", r.newNativeFunc(r.dateproto_setSeconds, nil, "setSeconds", nil, 2), true, false, true)
- o._putProp("setUTCSeconds", r.newNativeFunc(r.dateproto_setUTCSeconds, nil, "setUTCSeconds", nil, 2), true, false, true)
- o._putProp("setMinutes", r.newNativeFunc(r.dateproto_setMinutes, nil, "setMinutes", nil, 3), true, false, true)
- o._putProp("setUTCMinutes", r.newNativeFunc(r.dateproto_setUTCMinutes, nil, "setUTCMinutes", nil, 3), true, false, true)
- o._putProp("setHours", r.newNativeFunc(r.dateproto_setHours, nil, "setHours", nil, 4), true, false, true)
- o._putProp("setUTCHours", r.newNativeFunc(r.dateproto_setUTCHours, nil, "setUTCHours", nil, 4), true, false, true)
- o._putProp("setDate", r.newNativeFunc(r.dateproto_setDate, nil, "setDate", nil, 1), true, false, true)
- o._putProp("setUTCDate", r.newNativeFunc(r.dateproto_setUTCDate, nil, "setUTCDate", nil, 1), true, false, true)
- o._putProp("setMonth", r.newNativeFunc(r.dateproto_setMonth, nil, "setMonth", nil, 2), true, false, true)
- o._putProp("setUTCMonth", r.newNativeFunc(r.dateproto_setUTCMonth, nil, "setUTCMonth", nil, 2), true, false, true)
- o._putProp("setFullYear", r.newNativeFunc(r.dateproto_setFullYear, nil, "setFullYear", nil, 3), true, false, true)
- o._putProp("setUTCFullYear", r.newNativeFunc(r.dateproto_setUTCFullYear, nil, "setUTCFullYear", nil, 3), true, false, true)
- o._putProp("toUTCString", r.newNativeFunc(r.dateproto_toUTCString, nil, "toUTCString", nil, 0), true, false, true)
- o._putProp("toISOString", r.newNativeFunc(r.dateproto_toISOString, nil, "toISOString", nil, 0), true, false, true)
- o._putProp("toJSON", r.newNativeFunc(r.dateproto_toJSON, nil, "toJSON", nil, 1), true, false, true)
-
- o._putSym(SymToPrimitive, valueProp(r.newNativeFunc(r.dateproto_toPrimitive, nil, "[Symbol.toPrimitive]", nil, 1), false, false, true))
-
- return o
-}
-
-func (r *Runtime) createDate(val *Object) objectImpl {
- o := r.newNativeFuncObj(val, r.builtin_date, r.builtin_newDate, "Date", r.global.DatePrototype, intToValue(7))
-
- o._putProp("parse", r.newNativeFunc(r.date_parse, nil, "parse", nil, 1), true, false, true)
- o._putProp("UTC", r.newNativeFunc(r.date_UTC, nil, "UTC", nil, 7), true, false, true)
- o._putProp("now", r.newNativeFunc(r.date_now, nil, "now", nil, 0), true, false, true)
-
- return o
-}
-
-func (r *Runtime) initDate() {
- r.global.DatePrototype = r.newLazyObject(r.createDateProto)
-
- r.global.Date = r.newLazyObject(r.createDate)
- r.addToGlobal("Date", r.global.Date)
+var dateTemplate *objectTemplate
+var dateTemplateOnce sync.Once
+
+func getDateTemplate() *objectTemplate {
+ dateTemplateOnce.Do(func() {
+ dateTemplate = createDateTemplate()
+ })
+ return dateTemplate
+}
+
+func createDateTemplate() *objectTemplate {
+ t := newObjectTemplate()
+ t.protoFactory = func(r *Runtime) *Object {
+ return r.getFunctionPrototype()
+ }
+
+ t.putStr("name", func(r *Runtime) Value { return valueProp(asciiString("Date"), false, false, true) })
+ t.putStr("length", func(r *Runtime) Value { return valueProp(intToValue(7), false, false, true) })
+
+ t.putStr("prototype", func(r *Runtime) Value { return valueProp(r.getDatePrototype(), false, false, false) })
+
+ t.putStr("parse", func(r *Runtime) Value { return r.methodProp(r.date_parse, "parse", 1) })
+ t.putStr("UTC", func(r *Runtime) Value { return r.methodProp(r.date_UTC, "UTC", 7) })
+ t.putStr("now", func(r *Runtime) Value { return r.methodProp(r.date_now, "now", 0) })
+
+ return t
+}
+
+func (r *Runtime) getDate() *Object {
+ ret := r.global.Date
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Date = ret
+ r.newTemplatedFuncObject(getDateTemplate(), ret, r.builtin_date,
+ r.wrapNativeConstruct(r.builtin_newDate, ret, r.getDatePrototype()))
+ }
+ return ret
+}
+
+func createDateProtoTemplate() *objectTemplate {
+ t := newObjectTemplate()
+ t.protoFactory = func(r *Runtime) *Object {
+ return r.global.ObjectPrototype
+ }
+
+ t.putStr("constructor", func(r *Runtime) Value { return valueProp(r.getDate(), true, false, true) })
+
+ t.putStr("toString", func(r *Runtime) Value { return r.methodProp(r.dateproto_toString, "toString", 0) })
+ t.putStr("toDateString", func(r *Runtime) Value { return r.methodProp(r.dateproto_toDateString, "toDateString", 0) })
+ t.putStr("toTimeString", func(r *Runtime) Value { return r.methodProp(r.dateproto_toTimeString, "toTimeString", 0) })
+ t.putStr("toLocaleString", func(r *Runtime) Value { return r.methodProp(r.dateproto_toLocaleString, "toLocaleString", 0) })
+ t.putStr("toLocaleDateString", func(r *Runtime) Value { return r.methodProp(r.dateproto_toLocaleDateString, "toLocaleDateString", 0) })
+ t.putStr("toLocaleTimeString", func(r *Runtime) Value { return r.methodProp(r.dateproto_toLocaleTimeString, "toLocaleTimeString", 0) })
+ t.putStr("valueOf", func(r *Runtime) Value { return r.methodProp(r.dateproto_valueOf, "valueOf", 0) })
+ t.putStr("getTime", func(r *Runtime) Value { return r.methodProp(r.dateproto_getTime, "getTime", 0) })
+ t.putStr("getFullYear", func(r *Runtime) Value { return r.methodProp(r.dateproto_getFullYear, "getFullYear", 0) })
+ t.putStr("getUTCFullYear", func(r *Runtime) Value { return r.methodProp(r.dateproto_getUTCFullYear, "getUTCFullYear", 0) })
+ t.putStr("getMonth", func(r *Runtime) Value { return r.methodProp(r.dateproto_getMonth, "getMonth", 0) })
+ t.putStr("getUTCMonth", func(r *Runtime) Value { return r.methodProp(r.dateproto_getUTCMonth, "getUTCMonth", 0) })
+ t.putStr("getDate", func(r *Runtime) Value { return r.methodProp(r.dateproto_getDate, "getDate", 0) })
+ t.putStr("getUTCDate", func(r *Runtime) Value { return r.methodProp(r.dateproto_getUTCDate, "getUTCDate", 0) })
+ t.putStr("getDay", func(r *Runtime) Value { return r.methodProp(r.dateproto_getDay, "getDay", 0) })
+ t.putStr("getUTCDay", func(r *Runtime) Value { return r.methodProp(r.dateproto_getUTCDay, "getUTCDay", 0) })
+ t.putStr("getHours", func(r *Runtime) Value { return r.methodProp(r.dateproto_getHours, "getHours", 0) })
+ t.putStr("getUTCHours", func(r *Runtime) Value { return r.methodProp(r.dateproto_getUTCHours, "getUTCHours", 0) })
+ t.putStr("getMinutes", func(r *Runtime) Value { return r.methodProp(r.dateproto_getMinutes, "getMinutes", 0) })
+ t.putStr("getUTCMinutes", func(r *Runtime) Value { return r.methodProp(r.dateproto_getUTCMinutes, "getUTCMinutes", 0) })
+ t.putStr("getSeconds", func(r *Runtime) Value { return r.methodProp(r.dateproto_getSeconds, "getSeconds", 0) })
+ t.putStr("getUTCSeconds", func(r *Runtime) Value { return r.methodProp(r.dateproto_getUTCSeconds, "getUTCSeconds", 0) })
+ t.putStr("getMilliseconds", func(r *Runtime) Value { return r.methodProp(r.dateproto_getMilliseconds, "getMilliseconds", 0) })
+ t.putStr("getUTCMilliseconds", func(r *Runtime) Value { return r.methodProp(r.dateproto_getUTCMilliseconds, "getUTCMilliseconds", 0) })
+ t.putStr("getTimezoneOffset", func(r *Runtime) Value { return r.methodProp(r.dateproto_getTimezoneOffset, "getTimezoneOffset", 0) })
+ t.putStr("setTime", func(r *Runtime) Value { return r.methodProp(r.dateproto_setTime, "setTime", 1) })
+ t.putStr("setMilliseconds", func(r *Runtime) Value { return r.methodProp(r.dateproto_setMilliseconds, "setMilliseconds", 1) })
+ t.putStr("setUTCMilliseconds", func(r *Runtime) Value { return r.methodProp(r.dateproto_setUTCMilliseconds, "setUTCMilliseconds", 1) })
+ t.putStr("setSeconds", func(r *Runtime) Value { return r.methodProp(r.dateproto_setSeconds, "setSeconds", 2) })
+ t.putStr("setUTCSeconds", func(r *Runtime) Value { return r.methodProp(r.dateproto_setUTCSeconds, "setUTCSeconds", 2) })
+ t.putStr("setMinutes", func(r *Runtime) Value { return r.methodProp(r.dateproto_setMinutes, "setMinutes", 3) })
+ t.putStr("setUTCMinutes", func(r *Runtime) Value { return r.methodProp(r.dateproto_setUTCMinutes, "setUTCMinutes", 3) })
+ t.putStr("setHours", func(r *Runtime) Value { return r.methodProp(r.dateproto_setHours, "setHours", 4) })
+ t.putStr("setUTCHours", func(r *Runtime) Value { return r.methodProp(r.dateproto_setUTCHours, "setUTCHours", 4) })
+ t.putStr("setDate", func(r *Runtime) Value { return r.methodProp(r.dateproto_setDate, "setDate", 1) })
+ t.putStr("setUTCDate", func(r *Runtime) Value { return r.methodProp(r.dateproto_setUTCDate, "setUTCDate", 1) })
+ t.putStr("setMonth", func(r *Runtime) Value { return r.methodProp(r.dateproto_setMonth, "setMonth", 2) })
+ t.putStr("setUTCMonth", func(r *Runtime) Value { return r.methodProp(r.dateproto_setUTCMonth, "setUTCMonth", 2) })
+ t.putStr("setFullYear", func(r *Runtime) Value { return r.methodProp(r.dateproto_setFullYear, "setFullYear", 3) })
+ t.putStr("setUTCFullYear", func(r *Runtime) Value { return r.methodProp(r.dateproto_setUTCFullYear, "setUTCFullYear", 3) })
+ t.putStr("toUTCString", func(r *Runtime) Value { return r.methodProp(r.dateproto_toUTCString, "toUTCString", 0) })
+ t.putStr("toISOString", func(r *Runtime) Value { return r.methodProp(r.dateproto_toISOString, "toISOString", 0) })
+ t.putStr("toJSON", func(r *Runtime) Value { return r.methodProp(r.dateproto_toJSON, "toJSON", 1) })
+
+ t.putSym(SymToPrimitive, func(r *Runtime) Value {
+ return valueProp(r.newNativeFunc(r.dateproto_toPrimitive, "[Symbol.toPrimitive]", 1), false, false, true)
+ })
+
+ return t
+}
+
+var dateProtoTemplate *objectTemplate
+var dateProtoTemplateOnce sync.Once
+
+func getDateProtoTemplate() *objectTemplate {
+ dateProtoTemplateOnce.Do(func() {
+ dateProtoTemplate = createDateProtoTemplate()
+ })
+ return dateProtoTemplate
+}
+
+func (r *Runtime) getDatePrototype() *Object {
+ ret := r.global.DatePrototype
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.DatePrototype = ret
+ r.newTemplatedObject(getDateProtoTemplate(), ret)
+ }
+ return ret
}
diff --git a/vendor/github.com/dop251/goja/builtin_error.go b/vendor/github.com/dop251/goja/builtin_error.go
index 5c63ea8a14..b07bf6a777 100644
--- a/vendor/github.com/dop251/goja/builtin_error.go
+++ b/vendor/github.com/dop251/goja/builtin_error.go
@@ -176,61 +176,114 @@ func (r *Runtime) error_toString(call FunctionCall) Value {
return sb.String()
}
-func (r *Runtime) createErrorPrototype(name String) *Object {
- o := r.newBaseObject(r.global.ErrorPrototype, classObject)
+func (r *Runtime) createErrorPrototype(name String, ctor *Object) *Object {
+ o := r.newBaseObject(r.getErrorPrototype(), classObject)
o._putProp("message", stringEmpty, true, false, true)
o._putProp("name", name, true, false, true)
+ o._putProp("constructor", ctor, true, false, true)
return o.val
}
-func (r *Runtime) initErrors() {
- r.global.ErrorPrototype = r.NewObject()
- o := r.global.ErrorPrototype.self
- o._putProp("message", stringEmpty, true, false, true)
- o._putProp("name", stringError, true, false, true)
- o._putProp("toString", r.newNativeFunc(r.error_toString, nil, "toString", nil, 0), true, false, true)
-
- r.global.Error = r.newNativeFuncConstruct(r.builtin_Error, "Error", r.global.ErrorPrototype, 1)
- r.addToGlobal("Error", r.global.Error)
-
- r.global.AggregateErrorPrototype = r.createErrorPrototype(stringAggregateError)
- r.global.AggregateError = r.newNativeFuncConstructProto(r.builtin_AggregateError, "AggregateError", r.global.AggregateErrorPrototype, r.global.Error, 2)
- r.addToGlobal("AggregateError", r.global.AggregateError)
-
- r.global.TypeErrorPrototype = r.createErrorPrototype(stringTypeError)
-
- r.global.TypeError = r.newNativeFuncConstructProto(r.builtin_Error, "TypeError", r.global.TypeErrorPrototype, r.global.Error, 1)
- r.addToGlobal("TypeError", r.global.TypeError)
-
- r.global.ReferenceErrorPrototype = r.createErrorPrototype(stringReferenceError)
-
- r.global.ReferenceError = r.newNativeFuncConstructProto(r.builtin_Error, "ReferenceError", r.global.ReferenceErrorPrototype, r.global.Error, 1)
- r.addToGlobal("ReferenceError", r.global.ReferenceError)
-
- r.global.SyntaxErrorPrototype = r.createErrorPrototype(stringSyntaxError)
+func (r *Runtime) getErrorPrototype() *Object {
+ ret := r.global.ErrorPrototype
+ if ret == nil {
+ ret = r.NewObject()
+ r.global.ErrorPrototype = ret
+ o := ret.self
+ o._putProp("message", stringEmpty, true, false, true)
+ o._putProp("name", stringError, true, false, true)
+ o._putProp("toString", r.newNativeFunc(r.error_toString, "toString", 0), true, false, true)
+ o._putProp("constructor", r.getError(), true, false, true)
+ }
+ return ret
+}
- r.global.SyntaxError = r.newNativeFuncConstructProto(r.builtin_Error, "SyntaxError", r.global.SyntaxErrorPrototype, r.global.Error, 1)
- r.addToGlobal("SyntaxError", r.global.SyntaxError)
+func (r *Runtime) getError() *Object {
+ ret := r.global.Error
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Error = ret
+ r.newNativeFuncConstruct(ret, r.builtin_Error, "Error", r.getErrorPrototype(), 1)
+ }
+ return ret
+}
- r.global.RangeErrorPrototype = r.createErrorPrototype(stringRangeError)
+func (r *Runtime) getAggregateError() *Object {
+ ret := r.global.AggregateError
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.AggregateError = ret
+ r.newNativeFuncConstructProto(ret, r.builtin_AggregateError, "AggregateError", r.createErrorPrototype(stringAggregateError, ret), r.getError(), 2)
+ }
+ return ret
+}
- r.global.RangeError = r.newNativeFuncConstructProto(r.builtin_Error, "RangeError", r.global.RangeErrorPrototype, r.global.Error, 1)
- r.addToGlobal("RangeError", r.global.RangeError)
+func (r *Runtime) getTypeError() *Object {
+ ret := r.global.TypeError
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.TypeError = ret
+ r.newNativeFuncConstructProto(ret, r.builtin_Error, "TypeError", r.createErrorPrototype(stringTypeError, ret), r.getError(), 1)
+ }
+ return ret
+}
- r.global.EvalErrorPrototype = r.createErrorPrototype(stringEvalError)
- o = r.global.EvalErrorPrototype.self
- o._putProp("name", stringEvalError, true, false, true)
+func (r *Runtime) getReferenceError() *Object {
+ ret := r.global.ReferenceError
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.ReferenceError = ret
+ r.newNativeFuncConstructProto(ret, r.builtin_Error, "ReferenceError", r.createErrorPrototype(stringReferenceError, ret), r.getError(), 1)
+ }
+ return ret
+}
- r.global.EvalError = r.newNativeFuncConstructProto(r.builtin_Error, "EvalError", r.global.EvalErrorPrototype, r.global.Error, 1)
- r.addToGlobal("EvalError", r.global.EvalError)
+func (r *Runtime) getSyntaxError() *Object {
+ ret := r.global.SyntaxError
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.SyntaxError = ret
+ r.newNativeFuncConstructProto(ret, r.builtin_Error, "SyntaxError", r.createErrorPrototype(stringSyntaxError, ret), r.getError(), 1)
+ }
+ return ret
+}
- r.global.URIErrorPrototype = r.createErrorPrototype(stringURIError)
+func (r *Runtime) getRangeError() *Object {
+ ret := r.global.RangeError
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.RangeError = ret
+ r.newNativeFuncConstructProto(ret, r.builtin_Error, "RangeError", r.createErrorPrototype(stringRangeError, ret), r.getError(), 1)
+ }
+ return ret
+}
- r.global.URIError = r.newNativeFuncConstructProto(r.builtin_Error, "URIError", r.global.URIErrorPrototype, r.global.Error, 1)
- r.addToGlobal("URIError", r.global.URIError)
+func (r *Runtime) getEvalError() *Object {
+ ret := r.global.EvalError
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.EvalError = ret
+ r.newNativeFuncConstructProto(ret, r.builtin_Error, "EvalError", r.createErrorPrototype(stringEvalError, ret), r.getError(), 1)
+ }
+ return ret
+}
- r.global.GoErrorPrototype = r.createErrorPrototype(stringGoError)
+func (r *Runtime) getURIError() *Object {
+ ret := r.global.URIError
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.URIError = ret
+ r.newNativeFuncConstructProto(ret, r.builtin_Error, "URIError", r.createErrorPrototype(stringURIError, ret), r.getError(), 1)
+ }
+ return ret
+}
- r.global.GoError = r.newNativeFuncConstructProto(r.builtin_Error, "GoError", r.global.GoErrorPrototype, r.global.Error, 1)
- r.addToGlobal("GoError", r.global.GoError)
+func (r *Runtime) getGoError() *Object {
+ ret := r.global.GoError
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.GoError = ret
+ r.newNativeFuncConstructProto(ret, r.builtin_Error, "GoError", r.createErrorPrototype(stringGoError, ret), r.getError(), 1)
+ }
+ return ret
}
diff --git a/vendor/github.com/dop251/goja/builtin_function.go b/vendor/github.com/dop251/goja/builtin_function.go
index e9f90d15b5..26a1287c83 100644
--- a/vendor/github.com/dop251/goja/builtin_function.go
+++ b/vendor/github.com/dop251/goja/builtin_function.go
@@ -2,6 +2,7 @@ package goja
import (
"math"
+ "sync"
)
func (r *Runtime) functionCtor(args []Value, proto *Object, async, generator bool) *Object {
@@ -53,16 +54,10 @@ func (r *Runtime) builtin_generatorFunction(args []Value, proto *Object) *Object
func (r *Runtime) functionproto_toString(call FunctionCall) Value {
obj := r.toObject(call.This)
- if lazy, ok := obj.self.(*lazyObject); ok {
- obj.self = lazy.create(obj)
- }
switch f := obj.self.(type) {
case funcObjectImpl:
return f.source()
case *proxyObject:
- if lazy, ok := f.target.self.(*lazyObject); ok {
- f.target.self = lazy.create(f.target)
- }
if _, ok := f.target.self.(funcObjectImpl); ok {
return asciiString("function () { [native code] }")
}
@@ -210,18 +205,76 @@ lenNotInt:
return v
}
-func (r *Runtime) initFunction() {
- o := r.global.FunctionPrototype.self.(*nativeFuncObject)
- o.prototype = r.global.ObjectPrototype
- o._putProp("name", stringEmpty, false, false, true)
- o._putProp("apply", r.newNativeFunc(r.functionproto_apply, nil, "apply", nil, 2), true, false, true)
- o._putProp("bind", r.newNativeFunc(r.functionproto_bind, nil, "bind", nil, 1), true, false, true)
- o._putProp("call", r.newNativeFunc(r.functionproto_call, nil, "call", nil, 1), true, false, true)
- o._putProp("toString", r.newNativeFunc(r.functionproto_toString, nil, "toString", nil, 0), true, false, true)
- o._putSym(SymHasInstance, valueProp(r.newNativeFunc(r.functionproto_hasInstance, nil, "[Symbol.hasInstance]", nil, 1), false, false, false))
+func (r *Runtime) getThrower() *Object {
+ ret := r.global.thrower
+ if ret == nil {
+ ret = r.newNativeFunc(r.builtin_thrower, "", 0)
+ r.global.thrower = ret
+ r.object_freeze(FunctionCall{Arguments: []Value{ret}})
+ }
+ return ret
+}
- r.global.Function = r.newNativeFuncConstruct(r.builtin_Function, "Function", r.global.FunctionPrototype, 1)
- r.addToGlobal("Function", r.global.Function)
+func (r *Runtime) newThrowerProperty(configurable bool) Value {
+ thrower := r.getThrower()
+ return &valueProperty{
+ getterFunc: thrower,
+ setterFunc: thrower,
+ accessor: true,
+ configurable: configurable,
+ }
+}
+
+func createFunctionProtoTemplate() *objectTemplate {
+ t := newObjectTemplate()
+ t.protoFactory = func(r *Runtime) *Object {
+ return r.global.ObjectPrototype
+ }
+
+ t.putStr("constructor", func(r *Runtime) Value { return valueProp(r.getFunction(), true, false, true) })
+
+ t.putStr("length", func(r *Runtime) Value { return valueProp(_positiveZero, false, false, true) })
+ t.putStr("name", func(r *Runtime) Value { return valueProp(stringEmpty, false, false, true) })
+
+ t.putStr("apply", func(r *Runtime) Value { return r.methodProp(r.functionproto_apply, "apply", 2) })
+ t.putStr("bind", func(r *Runtime) Value { return r.methodProp(r.functionproto_bind, "bind", 1) })
+ t.putStr("call", func(r *Runtime) Value { return r.methodProp(r.functionproto_call, "call", 1) })
+ t.putStr("toString", func(r *Runtime) Value { return r.methodProp(r.functionproto_toString, "toString", 0) })
+
+ t.putStr("caller", func(r *Runtime) Value { return r.newThrowerProperty(true) })
+ t.putStr("arguments", func(r *Runtime) Value { return r.newThrowerProperty(true) })
+
+ t.putSym(SymHasInstance, func(r *Runtime) Value {
+ return valueProp(r.newNativeFunc(r.functionproto_hasInstance, "[Symbol.hasInstance]", 1), false, false, false)
+ })
+
+ return t
+}
+
+var functionProtoTemplate *objectTemplate
+var functionProtoTemplateOnce sync.Once
+
+func getFunctionProtoTemplate() *objectTemplate {
+ functionProtoTemplateOnce.Do(func() {
+ functionProtoTemplate = createFunctionProtoTemplate()
+ })
+ return functionProtoTemplate
+}
+
+func (r *Runtime) getFunctionPrototype() *Object {
+ ret := r.global.FunctionPrototype
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.FunctionPrototype = ret
+ r.newTemplatedFuncObject(getFunctionProtoTemplate(), ret, func(FunctionCall) Value {
+ return _undefined
+ }, nil)
+ }
+ return ret
+}
+
+func (r *Runtime) createFunction(v *Object) objectImpl {
+ return r.newNativeFuncConstructObj(v, r.builtin_Function, "Function", r.getFunctionPrototype(), 1)
}
func (r *Runtime) createAsyncFunctionProto(val *Object) objectImpl {
@@ -229,7 +282,7 @@ func (r *Runtime) createAsyncFunctionProto(val *Object) objectImpl {
class: classObject,
val: val,
extensible: true,
- prototype: r.global.FunctionPrototype,
+ prototype: r.getFunctionPrototype(),
}
o.init()
@@ -243,8 +296,9 @@ func (r *Runtime) createAsyncFunctionProto(val *Object) objectImpl {
func (r *Runtime) getAsyncFunctionPrototype() *Object {
var o *Object
if o = r.global.AsyncFunctionPrototype; o == nil {
- o = r.newLazyObject(r.createAsyncFunctionProto)
+ o = &Object{runtime: r}
r.global.AsyncFunctionPrototype = o
+ o.self = r.createAsyncFunctionProto(o)
}
return o
}
@@ -293,7 +347,7 @@ func (r *Runtime) builtin_genproto_throw(call FunctionCall) Value {
}
func (r *Runtime) createGeneratorFunctionProto(val *Object) objectImpl {
- o := newBaseObjectObj(val, r.global.FunctionPrototype, classObject)
+ o := newBaseObjectObj(val, r.getFunctionPrototype(), classObject)
o._putProp("constructor", r.getGeneratorFunction(), false, false, true)
o._putProp("prototype", r.getGeneratorPrototype(), false, false, true)
@@ -305,8 +359,9 @@ func (r *Runtime) createGeneratorFunctionProto(val *Object) objectImpl {
func (r *Runtime) getGeneratorFunctionPrototype() *Object {
var o *Object
if o = r.global.GeneratorFunctionPrototype; o == nil {
- o = r.newLazyObject(r.createGeneratorFunctionProto)
+ o = &Object{runtime: r}
r.global.GeneratorFunctionPrototype = o
+ o.self = r.createGeneratorFunctionProto(o)
}
return o
}
@@ -330,9 +385,9 @@ func (r *Runtime) createGeneratorProto(val *Object) objectImpl {
o := newBaseObjectObj(val, r.getIteratorPrototype(), classObject)
o._putProp("constructor", r.getGeneratorFunctionPrototype(), false, false, true)
- o._putProp("next", r.newNativeFunc(r.builtin_genproto_next, nil, "next", nil, 1), true, false, true)
- o._putProp("return", r.newNativeFunc(r.builtin_genproto_return, nil, "return", nil, 1), true, false, true)
- o._putProp("throw", r.newNativeFunc(r.builtin_genproto_throw, nil, "throw", nil, 1), true, false, true)
+ o._putProp("next", r.newNativeFunc(r.builtin_genproto_next, "next", 1), true, false, true)
+ o._putProp("return", r.newNativeFunc(r.builtin_genproto_return, "return", 1), true, false, true)
+ o._putProp("throw", r.newNativeFunc(r.builtin_genproto_throw, "throw", 1), true, false, true)
o._putSym(SymToStringTag, valueProp(asciiString(classGenerator), false, false, true))
@@ -348,3 +403,14 @@ func (r *Runtime) getGeneratorPrototype() *Object {
}
return o
}
+
+func (r *Runtime) getFunction() *Object {
+ ret := r.global.Function
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Function = ret
+ ret.self = r.createFunction(ret)
+ }
+
+ return ret
+}
diff --git a/vendor/github.com/dop251/goja/builtin_global.go b/vendor/github.com/dop251/goja/builtin_global.go
index cd6bfd40ce..5ef4176b49 100644
--- a/vendor/github.com/dop251/goja/builtin_global.go
+++ b/vendor/github.com/dop251/goja/builtin_global.go
@@ -8,6 +8,7 @@ import (
"regexp"
"strconv"
"strings"
+ "sync"
"unicode/utf8"
)
@@ -70,7 +71,7 @@ func (r *Runtime) _encode(uriString String, unescaped *[256]bool) String {
rn, _, err := reader.ReadRune()
if err != nil {
if err != io.EOF {
- panic(r.newError(r.global.URIError, "Malformed URI"))
+ panic(r.newError(r.getURIError(), "Malformed URI"))
}
break
}
@@ -127,7 +128,7 @@ func (r *Runtime) _decode(sv String, reservedSet *[256]bool) String {
switch s[i] {
case '%':
if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
- panic(r.newError(r.global.URIError, "Malformed URI"))
+ panic(r.newError(r.getURIError(), "Malformed URI"))
}
c := unhex(s[i+1])<<4 | unhex(s[i+2])
if !reservedSet[c] {
@@ -183,7 +184,7 @@ func (r *Runtime) _decode(sv String, reservedSet *[256]bool) String {
rn, size := utf8.DecodeRune(t)
if rn == utf8.RuneError {
if size != 3 || t[0] != 0xef || t[1] != 0xbf || t[2] != 0xbd {
- panic(r.newError(r.global.URIError, "Malformed URI"))
+ panic(r.newError(r.getURIError(), "Malformed URI"))
}
}
us = append(us, rn)
@@ -327,28 +328,84 @@ func (r *Runtime) builtin_unescape(call FunctionCall) Value {
return asciiString(asciiBuf)
}
-func (r *Runtime) initGlobalObject() {
- o := r.globalObject.self
- o._putProp("globalThis", r.globalObject, true, false, true)
- o._putProp("NaN", _NaN, false, false, false)
- o._putProp("undefined", _undefined, false, false, false)
- o._putProp("Infinity", _positiveInf, false, false, false)
-
- o._putProp("isNaN", r.newNativeFunc(r.builtin_isNaN, nil, "isNaN", nil, 1), true, false, true)
- o._putProp("parseInt", r.newNativeFunc(r.builtin_parseInt, nil, "parseInt", nil, 2), true, false, true)
- o._putProp("parseFloat", r.newNativeFunc(r.builtin_parseFloat, nil, "parseFloat", nil, 1), true, false, true)
- o._putProp("isFinite", r.newNativeFunc(r.builtin_isFinite, nil, "isFinite", nil, 1), true, false, true)
- o._putProp("decodeURI", r.newNativeFunc(r.builtin_decodeURI, nil, "decodeURI", nil, 1), true, false, true)
- o._putProp("decodeURIComponent", r.newNativeFunc(r.builtin_decodeURIComponent, nil, "decodeURIComponent", nil, 1), true, false, true)
- o._putProp("encodeURI", r.newNativeFunc(r.builtin_encodeURI, nil, "encodeURI", nil, 1), true, false, true)
- o._putProp("encodeURIComponent", r.newNativeFunc(r.builtin_encodeURIComponent, nil, "encodeURIComponent", nil, 1), true, false, true)
- o._putProp("escape", r.newNativeFunc(r.builtin_escape, nil, "escape", nil, 1), true, false, true)
- o._putProp("unescape", r.newNativeFunc(r.builtin_unescape, nil, "unescape", nil, 1), true, false, true)
-
- o._putSym(SymToStringTag, valueProp(asciiString(classGlobal), false, false, true))
+func createGlobalObjectTemplate() *objectTemplate {
+ t := newObjectTemplate()
+ t.protoFactory = func(r *Runtime) *Object {
+ return r.global.ObjectPrototype
+ }
+
+ t.putStr("Object", func(r *Runtime) Value { return valueProp(r.getObject(), true, false, true) })
+ t.putStr("Function", func(r *Runtime) Value { return valueProp(r.getFunction(), true, false, true) })
+ t.putStr("Array", func(r *Runtime) Value { return valueProp(r.getArray(), true, false, true) })
+ t.putStr("String", func(r *Runtime) Value { return valueProp(r.getString(), true, false, true) })
+ t.putStr("Number", func(r *Runtime) Value { return valueProp(r.getNumber(), true, false, true) })
+ t.putStr("RegExp", func(r *Runtime) Value { return valueProp(r.getRegExp(), true, false, true) })
+ t.putStr("Date", func(r *Runtime) Value { return valueProp(r.getDate(), true, false, true) })
+ t.putStr("Boolean", func(r *Runtime) Value { return valueProp(r.getBoolean(), true, false, true) })
+ t.putStr("Proxy", func(r *Runtime) Value { return valueProp(r.getProxy(), true, false, true) })
+ t.putStr("Reflect", func(r *Runtime) Value { return valueProp(r.getReflect(), true, false, true) })
+ t.putStr("Error", func(r *Runtime) Value { return valueProp(r.getError(), true, false, true) })
+ t.putStr("AggregateError", func(r *Runtime) Value { return valueProp(r.getAggregateError(), true, false, true) })
+ t.putStr("TypeError", func(r *Runtime) Value { return valueProp(r.getTypeError(), true, false, true) })
+ t.putStr("ReferenceError", func(r *Runtime) Value { return valueProp(r.getReferenceError(), true, false, true) })
+ t.putStr("SyntaxError", func(r *Runtime) Value { return valueProp(r.getSyntaxError(), true, false, true) })
+ t.putStr("RangeError", func(r *Runtime) Value { return valueProp(r.getRangeError(), true, false, true) })
+ t.putStr("EvalError", func(r *Runtime) Value { return valueProp(r.getEvalError(), true, false, true) })
+ t.putStr("URIError", func(r *Runtime) Value { return valueProp(r.getURIError(), true, false, true) })
+ t.putStr("GoError", func(r *Runtime) Value { return valueProp(r.getGoError(), true, false, true) })
+
+ t.putStr("eval", func(r *Runtime) Value { return valueProp(r.getEval(), true, false, true) })
+
+ t.putStr("Math", func(r *Runtime) Value { return valueProp(r.getMath(), true, false, true) })
+ t.putStr("JSON", func(r *Runtime) Value { return valueProp(r.getJSON(), true, false, true) })
+ addTypedArrays(t)
+ t.putStr("Symbol", func(r *Runtime) Value { return valueProp(r.getSymbol(), true, false, true) })
+ t.putStr("WeakSet", func(r *Runtime) Value { return valueProp(r.getWeakSet(), true, false, true) })
+ t.putStr("WeakMap", func(r *Runtime) Value { return valueProp(r.getWeakMap(), true, false, true) })
+ t.putStr("Map", func(r *Runtime) Value { return valueProp(r.getMap(), true, false, true) })
+ t.putStr("Set", func(r *Runtime) Value { return valueProp(r.getSet(), true, false, true) })
+ t.putStr("Promise", func(r *Runtime) Value { return valueProp(r.getPromise(), true, false, true) })
+
+ t.putStr("globalThis", func(r *Runtime) Value { return valueProp(r.globalObject, true, false, true) })
+ t.putStr("NaN", func(r *Runtime) Value { return valueProp(_NaN, false, false, false) })
+ t.putStr("undefined", func(r *Runtime) Value { return valueProp(_undefined, false, false, false) })
+ t.putStr("Infinity", func(r *Runtime) Value { return valueProp(_positiveInf, false, false, false) })
+
+ t.putStr("isNaN", func(r *Runtime) Value { return r.methodProp(r.builtin_isNaN, "isNaN", 1) })
+ t.putStr("parseInt", func(r *Runtime) Value { return valueProp(r.getParseInt(), true, false, true) })
+ t.putStr("parseFloat", func(r *Runtime) Value { return valueProp(r.getParseFloat(), true, false, true) })
+ t.putStr("isFinite", func(r *Runtime) Value { return r.methodProp(r.builtin_isFinite, "isFinite", 1) })
+ t.putStr("decodeURI", func(r *Runtime) Value { return r.methodProp(r.builtin_decodeURI, "decodeURI", 1) })
+ t.putStr("decodeURIComponent", func(r *Runtime) Value { return r.methodProp(r.builtin_decodeURIComponent, "decodeURIComponent", 1) })
+ t.putStr("encodeURI", func(r *Runtime) Value { return r.methodProp(r.builtin_encodeURI, "encodeURI", 1) })
+ t.putStr("encodeURIComponent", func(r *Runtime) Value { return r.methodProp(r.builtin_encodeURIComponent, "encodeURIComponent", 1) })
+ t.putStr("escape", func(r *Runtime) Value { return r.methodProp(r.builtin_escape, "escape", 1) })
+ t.putStr("unescape", func(r *Runtime) Value { return r.methodProp(r.builtin_unescape, "unescape", 1) })
// TODO: Annex B
+ t.putSym(SymToStringTag, func(r *Runtime) Value { return valueProp(asciiString(classGlobal), false, false, true) })
+
+ return t
+}
+
+var globalObjectTemplate *objectTemplate
+var globalObjectTemplateOnce sync.Once
+
+func getGlobalObjectTemplate() *objectTemplate {
+ globalObjectTemplateOnce.Do(func() {
+ globalObjectTemplate = createGlobalObjectTemplate()
+ })
+ return globalObjectTemplate
+}
+
+func (r *Runtime) getEval() *Object {
+ ret := r.global.Eval
+ if ret == nil {
+ ret = r.newNativeFunc(r.builtin_eval, "eval", 1)
+ r.global.Eval = ret
+ }
+ return ret
}
func digitVal(d byte) int {
diff --git a/vendor/github.com/dop251/goja/builtin_json.go b/vendor/github.com/dop251/goja/builtin_json.go
index ff0c9c71ee..9adb121d31 100644
--- a/vendor/github.com/dop251/goja/builtin_json.go
+++ b/vendor/github.com/dop251/goja/builtin_json.go
@@ -22,14 +22,14 @@ func (r *Runtime) builtinJSON_parse(call FunctionCall) Value {
value, err := r.builtinJSON_decodeValue(d)
if errors.Is(err, io.EOF) {
- panic(r.newError(r.global.SyntaxError, "Unexpected end of JSON input (%v)", err.Error()))
+ panic(r.newError(r.getSyntaxError(), "Unexpected end of JSON input (%v)", err.Error()))
}
if err != nil {
- panic(r.newError(r.global.SyntaxError, err.Error()))
+ panic(r.newError(r.getSyntaxError(), err.Error()))
}
if tok, err := d.Token(); err != io.EOF {
- panic(r.newError(r.global.SyntaxError, "Unexpected token at the end: %v", tok))
+ panic(r.newError(r.getSyntaxError(), "Unexpected token at the end: %v", tok))
}
var reviver func(FunctionCall) Value
@@ -522,11 +522,15 @@ func (ctx *_builtinJSON_stringifyContext) quote(str String) {
ctx.buf.WriteByte('"')
}
-func (r *Runtime) initJSON() {
- JSON := r.newBaseObject(r.global.ObjectPrototype, classObject)
- JSON._putProp("parse", r.newNativeFunc(r.builtinJSON_parse, nil, "parse", nil, 2), true, false, true)
- JSON._putProp("stringify", r.newNativeFunc(r.builtinJSON_stringify, nil, "stringify", nil, 3), true, false, true)
- JSON._putSym(SymToStringTag, valueProp(asciiString(classJSON), false, false, true))
-
- r.addToGlobal("JSON", JSON.val)
+func (r *Runtime) getJSON() *Object {
+ ret := r.global.JSON
+ if ret == nil {
+ JSON := r.newBaseObject(r.global.ObjectPrototype, classObject)
+ ret = JSON.val
+ r.global.JSON = ret
+ JSON._putProp("parse", r.newNativeFunc(r.builtinJSON_parse, "parse", 2), true, false, true)
+ JSON._putProp("stringify", r.newNativeFunc(r.builtinJSON_stringify, "stringify", 3), true, false, true)
+ JSON._putSym(SymToStringTag, valueProp(asciiString(classJSON), false, false, true))
+ }
+ return ret
}
diff --git a/vendor/github.com/dop251/goja/builtin_map.go b/vendor/github.com/dop251/goja/builtin_map.go
index 15ce4226d6..819d025fe1 100644
--- a/vendor/github.com/dop251/goja/builtin_map.go
+++ b/vendor/github.com/dop251/goja/builtin_map.go
@@ -270,24 +270,24 @@ func (r *Runtime) mapIterProto_next(call FunctionCall) Value {
func (r *Runtime) createMapProto(val *Object) objectImpl {
o := newBaseObjectObj(val, r.global.ObjectPrototype, classObject)
- o._putProp("constructor", r.global.Map, true, false, true)
- o._putProp("clear", r.newNativeFunc(r.mapProto_clear, nil, "clear", nil, 0), true, false, true)
- r.global.mapAdder = r.newNativeFunc(r.mapProto_set, nil, "set", nil, 2)
+ o._putProp("constructor", r.getMap(), true, false, true)
+ o._putProp("clear", r.newNativeFunc(r.mapProto_clear, "clear", 0), true, false, true)
+ r.global.mapAdder = r.newNativeFunc(r.mapProto_set, "set", 2)
o._putProp("set", r.global.mapAdder, true, false, true)
- o._putProp("delete", r.newNativeFunc(r.mapProto_delete, nil, "delete", nil, 1), true, false, true)
- o._putProp("forEach", r.newNativeFunc(r.mapProto_forEach, nil, "forEach", nil, 1), true, false, true)
- o._putProp("has", r.newNativeFunc(r.mapProto_has, nil, "has", nil, 1), true, false, true)
- o._putProp("get", r.newNativeFunc(r.mapProto_get, nil, "get", nil, 1), true, false, true)
+ o._putProp("delete", r.newNativeFunc(r.mapProto_delete, "delete", 1), true, false, true)
+ o._putProp("forEach", r.newNativeFunc(r.mapProto_forEach, "forEach", 1), true, false, true)
+ o._putProp("has", r.newNativeFunc(r.mapProto_has, "has", 1), true, false, true)
+ o._putProp("get", r.newNativeFunc(r.mapProto_get, "get", 1), true, false, true)
o.setOwnStr("size", &valueProperty{
- getterFunc: r.newNativeFunc(r.mapProto_getSize, nil, "get size", nil, 0),
+ getterFunc: r.newNativeFunc(r.mapProto_getSize, "get size", 0),
accessor: true,
writable: true,
configurable: true,
}, true)
- o._putProp("keys", r.newNativeFunc(r.mapProto_keys, nil, "keys", nil, 0), true, false, true)
- o._putProp("values", r.newNativeFunc(r.mapProto_values, nil, "values", nil, 0), true, false, true)
+ o._putProp("keys", r.newNativeFunc(r.mapProto_keys, "keys", 0), true, false, true)
+ o._putProp("values", r.newNativeFunc(r.mapProto_values, "values", 0), true, false, true)
- entriesFunc := r.newNativeFunc(r.mapProto_entries, nil, "entries", nil, 0)
+ entriesFunc := r.newNativeFunc(r.mapProto_entries, "entries", 0)
o._putProp("entries", entriesFunc, true, false, true)
o._putSym(SymIterator, valueProp(entriesFunc, true, false, true))
o._putSym(SymToStringTag, valueProp(asciiString(classMap), false, false, true))
@@ -296,7 +296,7 @@ func (r *Runtime) createMapProto(val *Object) objectImpl {
}
func (r *Runtime) createMap(val *Object) objectImpl {
- o := r.newNativeConstructOnly(val, r.builtin_newMap, r.global.MapPrototype, "Map", 0)
+ o := r.newNativeConstructOnly(val, r.builtin_newMap, r.getMapPrototype(), "Map", 0)
r.putSpeciesReturnThis(o)
return o
@@ -305,7 +305,7 @@ func (r *Runtime) createMap(val *Object) objectImpl {
func (r *Runtime) createMapIterProto(val *Object) objectImpl {
o := newBaseObjectObj(val, r.getIteratorPrototype(), classObject)
- o._putProp("next", r.newNativeFunc(r.mapIterProto_next, nil, "next", nil, 0), true, false, true)
+ o._putProp("next", r.newNativeFunc(r.mapIterProto_next, "next", 0), true, false, true)
o._putSym(SymToStringTag, valueProp(asciiString(classMapIterator), false, false, true))
return o
@@ -321,11 +321,22 @@ func (r *Runtime) getMapIteratorPrototype() *Object {
return o
}
-func (r *Runtime) initMap() {
- r.global.MapIteratorPrototype = r.newLazyObject(r.createMapIterProto)
-
- r.global.MapPrototype = r.newLazyObject(r.createMapProto)
- r.global.Map = r.newLazyObject(r.createMap)
+func (r *Runtime) getMapPrototype() *Object {
+ ret := r.global.MapPrototype
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.MapPrototype = ret
+ ret.self = r.createMapProto(ret)
+ }
+ return ret
+}
- r.addToGlobal("Map", r.global.Map)
+func (r *Runtime) getMap() *Object {
+ ret := r.global.Map
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Map = ret
+ ret.self = r.createMap(ret)
+ }
+ return ret
}
diff --git a/vendor/github.com/dop251/goja/builtin_math.go b/vendor/github.com/dop251/goja/builtin_math.go
index 6347201be2..169ea18ca4 100644
--- a/vendor/github.com/dop251/goja/builtin_math.go
+++ b/vendor/github.com/dop251/goja/builtin_math.go
@@ -3,6 +3,7 @@ package goja
import (
"math"
"math/bits"
+ "sync"
)
func (r *Runtime) math_abs(call FunctionCall) Value {
@@ -280,64 +281,78 @@ func (r *Runtime) math_trunc(call FunctionCall) Value {
return floatToValue(math.Trunc(arg.ToFloat()))
}
-func (r *Runtime) createMath(val *Object) objectImpl {
- m := &baseObject{
- class: classObject,
- val: val,
- extensible: true,
- prototype: r.global.ObjectPrototype,
+func createMathTemplate() *objectTemplate {
+ t := newObjectTemplate()
+ t.protoFactory = func(r *Runtime) *Object {
+ return r.global.ObjectPrototype
}
- m.init()
-
- m._putProp("E", valueFloat(math.E), false, false, false)
- m._putProp("LN10", valueFloat(math.Ln10), false, false, false)
- m._putProp("LN2", valueFloat(math.Ln2), false, false, false)
- m._putProp("LOG10E", valueFloat(math.Log10E), false, false, false)
- m._putProp("LOG2E", valueFloat(math.Log2E), false, false, false)
- m._putProp("PI", valueFloat(math.Pi), false, false, false)
- m._putProp("SQRT1_2", valueFloat(sqrt1_2), false, false, false)
- m._putProp("SQRT2", valueFloat(math.Sqrt2), false, false, false)
- m._putSym(SymToStringTag, valueProp(asciiString(classMath), false, false, true))
-
- m._putProp("abs", r.newNativeFunc(r.math_abs, nil, "abs", nil, 1), true, false, true)
- m._putProp("acos", r.newNativeFunc(r.math_acos, nil, "acos", nil, 1), true, false, true)
- m._putProp("acosh", r.newNativeFunc(r.math_acosh, nil, "acosh", nil, 1), true, false, true)
- m._putProp("asin", r.newNativeFunc(r.math_asin, nil, "asin", nil, 1), true, false, true)
- m._putProp("asinh", r.newNativeFunc(r.math_asinh, nil, "asinh", nil, 1), true, false, true)
- m._putProp("atan", r.newNativeFunc(r.math_atan, nil, "atan", nil, 1), true, false, true)
- m._putProp("atanh", r.newNativeFunc(r.math_atanh, nil, "atanh", nil, 1), true, false, true)
- m._putProp("atan2", r.newNativeFunc(r.math_atan2, nil, "atan2", nil, 2), true, false, true)
- m._putProp("cbrt", r.newNativeFunc(r.math_cbrt, nil, "cbrt", nil, 1), true, false, true)
- m._putProp("ceil", r.newNativeFunc(r.math_ceil, nil, "ceil", nil, 1), true, false, true)
- m._putProp("clz32", r.newNativeFunc(r.math_clz32, nil, "clz32", nil, 1), true, false, true)
- m._putProp("cos", r.newNativeFunc(r.math_cos, nil, "cos", nil, 1), true, false, true)
- m._putProp("cosh", r.newNativeFunc(r.math_cosh, nil, "cosh", nil, 1), true, false, true)
- m._putProp("exp", r.newNativeFunc(r.math_exp, nil, "exp", nil, 1), true, false, true)
- m._putProp("expm1", r.newNativeFunc(r.math_expm1, nil, "expm1", nil, 1), true, false, true)
- m._putProp("floor", r.newNativeFunc(r.math_floor, nil, "floor", nil, 1), true, false, true)
- m._putProp("fround", r.newNativeFunc(r.math_fround, nil, "fround", nil, 1), true, false, true)
- m._putProp("hypot", r.newNativeFunc(r.math_hypot, nil, "hypot", nil, 2), true, false, true)
- m._putProp("imul", r.newNativeFunc(r.math_imul, nil, "imul", nil, 2), true, false, true)
- m._putProp("log", r.newNativeFunc(r.math_log, nil, "log", nil, 1), true, false, true)
- m._putProp("log1p", r.newNativeFunc(r.math_log1p, nil, "log1p", nil, 1), true, false, true)
- m._putProp("log10", r.newNativeFunc(r.math_log10, nil, "log10", nil, 1), true, false, true)
- m._putProp("log2", r.newNativeFunc(r.math_log2, nil, "log2", nil, 1), true, false, true)
- m._putProp("max", r.newNativeFunc(r.math_max, nil, "max", nil, 2), true, false, true)
- m._putProp("min", r.newNativeFunc(r.math_min, nil, "min", nil, 2), true, false, true)
- m._putProp("pow", r.newNativeFunc(r.math_pow, nil, "pow", nil, 2), true, false, true)
- m._putProp("random", r.newNativeFunc(r.math_random, nil, "random", nil, 0), true, false, true)
- m._putProp("round", r.newNativeFunc(r.math_round, nil, "round", nil, 1), true, false, true)
- m._putProp("sign", r.newNativeFunc(r.math_sign, nil, "sign", nil, 1), true, false, true)
- m._putProp("sin", r.newNativeFunc(r.math_sin, nil, "sin", nil, 1), true, false, true)
- m._putProp("sinh", r.newNativeFunc(r.math_sinh, nil, "sinh", nil, 1), true, false, true)
- m._putProp("sqrt", r.newNativeFunc(r.math_sqrt, nil, "sqrt", nil, 1), true, false, true)
- m._putProp("tan", r.newNativeFunc(r.math_tan, nil, "tan", nil, 1), true, false, true)
- m._putProp("tanh", r.newNativeFunc(r.math_tanh, nil, "tanh", nil, 1), true, false, true)
- m._putProp("trunc", r.newNativeFunc(r.math_trunc, nil, "trunc", nil, 1), true, false, true)
-
- return m
-}
-
-func (r *Runtime) initMath() {
- r.addToGlobal("Math", r.newLazyObject(r.createMath))
+
+ t.putStr("E", func(r *Runtime) Value { return valueProp(valueFloat(math.E), false, false, false) })
+ t.putStr("LN10", func(r *Runtime) Value { return valueProp(valueFloat(math.Ln10), false, false, false) })
+ t.putStr("LN2", func(r *Runtime) Value { return valueProp(valueFloat(math.Ln2), false, false, false) })
+ t.putStr("LOG10E", func(r *Runtime) Value { return valueProp(valueFloat(math.Log10E), false, false, false) })
+ t.putStr("LOG2E", func(r *Runtime) Value { return valueProp(valueFloat(math.Log2E), false, false, false) })
+ t.putStr("PI", func(r *Runtime) Value { return valueProp(valueFloat(math.Pi), false, false, false) })
+ t.putStr("SQRT1_2", func(r *Runtime) Value { return valueProp(valueFloat(sqrt1_2), false, false, false) })
+ t.putStr("SQRT2", func(r *Runtime) Value { return valueProp(valueFloat(math.Sqrt2), false, false, false) })
+
+ t.putSym(SymToStringTag, func(r *Runtime) Value { return valueProp(asciiString(classMath), false, false, true) })
+
+ t.putStr("abs", func(r *Runtime) Value { return r.methodProp(r.math_abs, "abs", 1) })
+ t.putStr("acos", func(r *Runtime) Value { return r.methodProp(r.math_acos, "acos", 1) })
+ t.putStr("acosh", func(r *Runtime) Value { return r.methodProp(r.math_acosh, "acosh", 1) })
+ t.putStr("asin", func(r *Runtime) Value { return r.methodProp(r.math_asin, "asin", 1) })
+ t.putStr("asinh", func(r *Runtime) Value { return r.methodProp(r.math_asinh, "asinh", 1) })
+ t.putStr("atan", func(r *Runtime) Value { return r.methodProp(r.math_atan, "atan", 1) })
+ t.putStr("atanh", func(r *Runtime) Value { return r.methodProp(r.math_atanh, "atanh", 1) })
+ t.putStr("atan2", func(r *Runtime) Value { return r.methodProp(r.math_atan2, "atan2", 2) })
+ t.putStr("cbrt", func(r *Runtime) Value { return r.methodProp(r.math_cbrt, "cbrt", 1) })
+ t.putStr("ceil", func(r *Runtime) Value { return r.methodProp(r.math_ceil, "ceil", 1) })
+ t.putStr("clz32", func(r *Runtime) Value { return r.methodProp(r.math_clz32, "clz32", 1) })
+ t.putStr("cos", func(r *Runtime) Value { return r.methodProp(r.math_cos, "cos", 1) })
+ t.putStr("cosh", func(r *Runtime) Value { return r.methodProp(r.math_cosh, "cosh", 1) })
+ t.putStr("exp", func(r *Runtime) Value { return r.methodProp(r.math_exp, "exp", 1) })
+ t.putStr("expm1", func(r *Runtime) Value { return r.methodProp(r.math_expm1, "expm1", 1) })
+ t.putStr("floor", func(r *Runtime) Value { return r.methodProp(r.math_floor, "floor", 1) })
+ t.putStr("fround", func(r *Runtime) Value { return r.methodProp(r.math_fround, "fround", 1) })
+ t.putStr("hypot", func(r *Runtime) Value { return r.methodProp(r.math_hypot, "hypot", 2) })
+ t.putStr("imul", func(r *Runtime) Value { return r.methodProp(r.math_imul, "imul", 2) })
+ t.putStr("log", func(r *Runtime) Value { return r.methodProp(r.math_log, "log", 1) })
+ t.putStr("log1p", func(r *Runtime) Value { return r.methodProp(r.math_log1p, "log1p", 1) })
+ t.putStr("log10", func(r *Runtime) Value { return r.methodProp(r.math_log10, "log10", 1) })
+ t.putStr("log2", func(r *Runtime) Value { return r.methodProp(r.math_log2, "log2", 1) })
+ t.putStr("max", func(r *Runtime) Value { return r.methodProp(r.math_max, "max", 2) })
+ t.putStr("min", func(r *Runtime) Value { return r.methodProp(r.math_min, "min", 2) })
+ t.putStr("pow", func(r *Runtime) Value { return r.methodProp(r.math_pow, "pow", 2) })
+ t.putStr("random", func(r *Runtime) Value { return r.methodProp(r.math_random, "random", 0) })
+ t.putStr("round", func(r *Runtime) Value { return r.methodProp(r.math_round, "round", 1) })
+ t.putStr("sign", func(r *Runtime) Value { return r.methodProp(r.math_sign, "sign", 1) })
+ t.putStr("sin", func(r *Runtime) Value { return r.methodProp(r.math_sin, "sin", 1) })
+ t.putStr("sinh", func(r *Runtime) Value { return r.methodProp(r.math_sinh, "sinh", 1) })
+ t.putStr("sqrt", func(r *Runtime) Value { return r.methodProp(r.math_sqrt, "sqrt", 1) })
+ t.putStr("tan", func(r *Runtime) Value { return r.methodProp(r.math_tan, "tan", 1) })
+ t.putStr("tanh", func(r *Runtime) Value { return r.methodProp(r.math_tanh, "tanh", 1) })
+ t.putStr("trunc", func(r *Runtime) Value { return r.methodProp(r.math_trunc, "trunc", 1) })
+
+ return t
+}
+
+var mathTemplate *objectTemplate
+var mathTemplateOnce sync.Once
+
+func getMathTemplate() *objectTemplate {
+ mathTemplateOnce.Do(func() {
+ mathTemplate = createMathTemplate()
+ })
+ return mathTemplate
+}
+
+func (r *Runtime) getMath() *Object {
+ ret := r.global.Math
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Math = ret
+ r.newTemplatedObject(getMathTemplate(), ret)
+ }
+ return ret
}
diff --git a/vendor/github.com/dop251/goja/builtin_number.go b/vendor/github.com/dop251/goja/builtin_number.go
index 472e01475b..43add4ffb3 100644
--- a/vendor/github.com/dop251/goja/builtin_number.go
+++ b/vendor/github.com/dop251/goja/builtin_number.go
@@ -2,6 +2,7 @@ package goja
import (
"math"
+ "sync"
"github.com/dop251/goja/ftoa"
)
@@ -19,6 +20,9 @@ func (r *Runtime) toNumber(v Value) Value {
return t.valueOf()
}
}
+ if t == r.global.NumberPrototype {
+ return _positiveZero
+ }
}
panic(r.NewTypeError("Value is not a number: %s", v))
}
@@ -46,6 +50,9 @@ func (r *Runtime) numberproto_toString(call FunctionCall) Value {
}
}
}
+ if t == r.global.NumberPrototype {
+ return asciiString("0")
+ }
}
if numVal == nil {
panic(r.NewTypeError("Value is not a number"))
@@ -58,7 +65,7 @@ func (r *Runtime) numberproto_toString(call FunctionCall) Value {
}
if radix < 2 || radix > 36 {
- panic(r.newError(r.global.RangeError, "toString() radix argument must be between 2 and 36"))
+ panic(r.newError(r.getRangeError(), "toString() radix argument must be between 2 and 36"))
}
num := numVal.ToFloat()
@@ -87,7 +94,7 @@ func (r *Runtime) numberproto_toFixed(call FunctionCall) Value {
prec := call.Argument(0).ToInteger()
if prec < 0 || prec > 100 {
- panic(r.newError(r.global.RangeError, "toFixed() precision must be between 0 and 100"))
+ panic(r.newError(r.getRangeError(), "toFixed() precision must be between 0 and 100"))
}
if math.IsNaN(num) {
return stringNaN
@@ -116,7 +123,7 @@ func (r *Runtime) numberproto_toExponential(call FunctionCall) Value {
}
if prec < 0 || prec > 100 {
- panic(r.newError(r.global.RangeError, "toExponential() precision must be between 0 and 100"))
+ panic(r.newError(r.getRangeError(), "toExponential() precision must be between 0 and 100"))
}
return asciiString(fToStr(num, ftoa.ModeExponential, int(prec+1)))
@@ -141,7 +148,7 @@ func (r *Runtime) numberproto_toPrecision(call FunctionCall) Value {
return stringNegInfinity
}
if prec < 1 || prec > 100 {
- panic(r.newError(r.global.RangeError, "toPrecision() precision must be between 1 and 100"))
+ panic(r.newError(r.getRangeError(), "toPrecision() precision must be between 1 and 100"))
}
return asciiString(fToStr(num, ftoa.ModePrecision, int(prec)))
@@ -189,32 +196,108 @@ func (r *Runtime) number_isSafeInteger(call FunctionCall) Value {
return valueFalse
}
-func (r *Runtime) initNumber() {
- r.global.NumberPrototype = r.newPrimitiveObject(valueInt(0), r.global.ObjectPrototype, classNumber)
- o := r.global.NumberPrototype.self
- o._putProp("toExponential", r.newNativeFunc(r.numberproto_toExponential, nil, "toExponential", nil, 1), true, false, true)
- o._putProp("toFixed", r.newNativeFunc(r.numberproto_toFixed, nil, "toFixed", nil, 1), true, false, true)
- o._putProp("toLocaleString", r.newNativeFunc(r.numberproto_toString, nil, "toLocaleString", nil, 0), true, false, true)
- o._putProp("toPrecision", r.newNativeFunc(r.numberproto_toPrecision, nil, "toPrecision", nil, 1), true, false, true)
- o._putProp("toString", r.newNativeFunc(r.numberproto_toString, nil, "toString", nil, 1), true, false, true)
- o._putProp("valueOf", r.newNativeFunc(r.numberproto_valueOf, nil, "valueOf", nil, 0), true, false, true)
-
- r.global.Number = r.newNativeFunc(r.builtin_Number, r.builtin_newNumber, "Number", r.global.NumberPrototype, 1)
- o = r.global.Number.self
- o._putProp("EPSILON", _epsilon, false, false, false)
- o._putProp("isFinite", r.newNativeFunc(r.number_isFinite, nil, "isFinite", nil, 1), true, false, true)
- o._putProp("isInteger", r.newNativeFunc(r.number_isInteger, nil, "isInteger", nil, 1), true, false, true)
- o._putProp("isNaN", r.newNativeFunc(r.number_isNaN, nil, "isNaN", nil, 1), true, false, true)
- o._putProp("isSafeInteger", r.newNativeFunc(r.number_isSafeInteger, nil, "isSafeInteger", nil, 1), true, false, true)
- o._putProp("MAX_SAFE_INTEGER", valueInt(maxInt-1), false, false, false)
- o._putProp("MIN_SAFE_INTEGER", valueInt(-(maxInt - 1)), false, false, false)
- o._putProp("MIN_VALUE", valueFloat(math.SmallestNonzeroFloat64), false, false, false)
- o._putProp("MAX_VALUE", valueFloat(math.MaxFloat64), false, false, false)
- o._putProp("NaN", _NaN, false, false, false)
- o._putProp("NEGATIVE_INFINITY", _negativeInf, false, false, false)
- o._putProp("parseFloat", r.Get("parseFloat"), true, false, true)
- o._putProp("parseInt", r.Get("parseInt"), true, false, true)
- o._putProp("POSITIVE_INFINITY", _positiveInf, false, false, false)
- r.addToGlobal("Number", r.global.Number)
+func createNumberProtoTemplate() *objectTemplate {
+ t := newObjectTemplate()
+ t.protoFactory = func(r *Runtime) *Object {
+ return r.global.ObjectPrototype
+ }
+
+ t.putStr("constructor", func(r *Runtime) Value { return valueProp(r.getNumber(), true, false, true) })
+
+ t.putStr("toExponential", func(r *Runtime) Value { return r.methodProp(r.numberproto_toExponential, "toExponential", 1) })
+ t.putStr("toFixed", func(r *Runtime) Value { return r.methodProp(r.numberproto_toFixed, "toFixed", 1) })
+ t.putStr("toLocaleString", func(r *Runtime) Value { return r.methodProp(r.numberproto_toString, "toLocaleString", 0) })
+ t.putStr("toPrecision", func(r *Runtime) Value { return r.methodProp(r.numberproto_toPrecision, "toPrecision", 1) })
+ t.putStr("toString", func(r *Runtime) Value { return r.methodProp(r.numberproto_toString, "toString", 1) })
+ t.putStr("valueOf", func(r *Runtime) Value { return r.methodProp(r.numberproto_valueOf, "valueOf", 0) })
+
+ return t
+}
+
+var numberProtoTemplate *objectTemplate
+var numberProtoTemplateOnce sync.Once
+func getNumberProtoTemplate() *objectTemplate {
+ numberProtoTemplateOnce.Do(func() {
+ numberProtoTemplate = createNumberProtoTemplate()
+ })
+ return numberProtoTemplate
+}
+
+func (r *Runtime) getNumberPrototype() *Object {
+ ret := r.global.NumberPrototype
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.NumberPrototype = ret
+ o := r.newTemplatedObject(getNumberProtoTemplate(), ret)
+ o.class = classNumber
+ }
+ return ret
+}
+
+func (r *Runtime) getParseFloat() *Object {
+ ret := r.global.parseFloat
+ if ret == nil {
+ ret = r.newNativeFunc(r.builtin_parseFloat, "parseFloat", 1)
+ r.global.parseFloat = ret
+ }
+ return ret
+}
+
+func (r *Runtime) getParseInt() *Object {
+ ret := r.global.parseInt
+ if ret == nil {
+ ret = r.newNativeFunc(r.builtin_parseInt, "parseInt", 2)
+ r.global.parseInt = ret
+ }
+ return ret
+}
+
+func createNumberTemplate() *objectTemplate {
+ t := newObjectTemplate()
+ t.protoFactory = func(r *Runtime) *Object {
+ return r.getFunctionPrototype()
+ }
+ t.putStr("length", func(r *Runtime) Value { return valueProp(intToValue(1), false, false, true) })
+ t.putStr("name", func(r *Runtime) Value { return valueProp(asciiString("Number"), false, false, true) })
+
+ t.putStr("prototype", func(r *Runtime) Value { return valueProp(r.getNumberPrototype(), false, false, false) })
+
+ t.putStr("EPSILON", func(r *Runtime) Value { return valueProp(_epsilon, false, false, false) })
+ t.putStr("isFinite", func(r *Runtime) Value { return r.methodProp(r.number_isFinite, "isFinite", 1) })
+ t.putStr("isInteger", func(r *Runtime) Value { return r.methodProp(r.number_isInteger, "isInteger", 1) })
+ t.putStr("isNaN", func(r *Runtime) Value { return r.methodProp(r.number_isNaN, "isNaN", 1) })
+ t.putStr("isSafeInteger", func(r *Runtime) Value { return r.methodProp(r.number_isSafeInteger, "isSafeInteger", 1) })
+ t.putStr("MAX_SAFE_INTEGER", func(r *Runtime) Value { return valueProp(valueInt(maxInt-1), false, false, false) })
+ t.putStr("MIN_SAFE_INTEGER", func(r *Runtime) Value { return valueProp(valueInt(-(maxInt - 1)), false, false, false) })
+ t.putStr("MIN_VALUE", func(r *Runtime) Value { return valueProp(valueFloat(math.SmallestNonzeroFloat64), false, false, false) })
+ t.putStr("MAX_VALUE", func(r *Runtime) Value { return valueProp(valueFloat(math.MaxFloat64), false, false, false) })
+ t.putStr("NaN", func(r *Runtime) Value { return valueProp(_NaN, false, false, false) })
+ t.putStr("NEGATIVE_INFINITY", func(r *Runtime) Value { return valueProp(_negativeInf, false, false, false) })
+ t.putStr("parseFloat", func(r *Runtime) Value { return valueProp(r.getParseFloat(), true, false, true) })
+ t.putStr("parseInt", func(r *Runtime) Value { return valueProp(r.getParseInt(), true, false, true) })
+ t.putStr("POSITIVE_INFINITY", func(r *Runtime) Value { return valueProp(_positiveInf, false, false, false) })
+
+ return t
+}
+
+var numberTemplate *objectTemplate
+var numberTemplateOnce sync.Once
+
+func getNumberTemplate() *objectTemplate {
+ numberTemplateOnce.Do(func() {
+ numberTemplate = createNumberTemplate()
+ })
+ return numberTemplate
+}
+
+func (r *Runtime) getNumber() *Object {
+ ret := r.global.Number
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Number = ret
+ r.newTemplatedFuncObject(getNumberTemplate(), ret, r.builtin_Number,
+ r.wrapNativeConstruct(r.builtin_newNumber, ret, r.getNumberPrototype()))
+ }
+ return ret
}
diff --git a/vendor/github.com/dop251/goja/builtin_object.go b/vendor/github.com/dop251/goja/builtin_object.go
index 4aabbbdc51..6bf1ff80ae 100644
--- a/vendor/github.com/dop251/goja/builtin_object.go
+++ b/vendor/github.com/dop251/goja/builtin_object.go
@@ -2,10 +2,11 @@ package goja
import (
"fmt"
+ "sync"
)
func (r *Runtime) builtin_Object(args []Value, newTarget *Object) *Object {
- if newTarget != nil && newTarget != r.global.Object {
+ if newTarget != nil && newTarget != r.getObject() {
proto := r.getPrototypeFromCtor(newTarget, nil, r.global.ObjectPrototype)
return r.newBaseObject(proto, classObject).val
}
@@ -595,45 +596,116 @@ func (r *Runtime) object_hasOwn(call FunctionCall) Value {
}
}
-func (r *Runtime) initObject() {
- o := r.global.ObjectPrototype.self
- o._putProp("toString", r.newNativeFunc(r.objectproto_toString, nil, "toString", nil, 0), true, false, true)
- o._putProp("toLocaleString", r.newNativeFunc(r.objectproto_toLocaleString, nil, "toLocaleString", nil, 0), true, false, true)
- o._putProp("valueOf", r.newNativeFunc(r.objectproto_valueOf, nil, "valueOf", nil, 0), true, false, true)
- o._putProp("hasOwnProperty", r.newNativeFunc(r.objectproto_hasOwnProperty, nil, "hasOwnProperty", nil, 1), true, false, true)
- o._putProp("isPrototypeOf", r.newNativeFunc(r.objectproto_isPrototypeOf, nil, "isPrototypeOf", nil, 1), true, false, true)
- o._putProp("propertyIsEnumerable", r.newNativeFunc(r.objectproto_propertyIsEnumerable, nil, "propertyIsEnumerable", nil, 1), true, false, true)
- o.defineOwnPropertyStr(__proto__, PropertyDescriptor{
- Getter: r.newNativeFunc(r.objectproto_getProto, nil, "get __proto__", nil, 0),
- Setter: r.newNativeFunc(r.objectproto_setProto, nil, "set __proto__", nil, 1),
- Configurable: FLAG_TRUE,
- }, true)
-
- r.global.Object = r.newNativeConstructOnly(nil, r.builtin_Object, r.global.ObjectPrototype, "Object", 1).val
- r.global.ObjectPrototype.self._putProp("constructor", r.global.Object, true, false, true)
- o = r.global.Object.self
- o._putProp("assign", r.newNativeFunc(r.object_assign, nil, "assign", nil, 2), true, false, true)
- o._putProp("defineProperty", r.newNativeFunc(r.object_defineProperty, nil, "defineProperty", nil, 3), true, false, true)
- o._putProp("defineProperties", r.newNativeFunc(r.object_defineProperties, nil, "defineProperties", nil, 2), true, false, true)
- o._putProp("entries", r.newNativeFunc(r.object_entries, nil, "entries", nil, 1), true, false, true)
- o._putProp("getOwnPropertyDescriptor", r.newNativeFunc(r.object_getOwnPropertyDescriptor, nil, "getOwnPropertyDescriptor", nil, 2), true, false, true)
- o._putProp("getOwnPropertyDescriptors", r.newNativeFunc(r.object_getOwnPropertyDescriptors, nil, "getOwnPropertyDescriptors", nil, 1), true, false, true)
- o._putProp("getPrototypeOf", r.newNativeFunc(r.object_getPrototypeOf, nil, "getPrototypeOf", nil, 1), true, false, true)
- o._putProp("is", r.newNativeFunc(r.object_is, nil, "is", nil, 2), true, false, true)
- o._putProp("getOwnPropertyNames", r.newNativeFunc(r.object_getOwnPropertyNames, nil, "getOwnPropertyNames", nil, 1), true, false, true)
- o._putProp("getOwnPropertySymbols", r.newNativeFunc(r.object_getOwnPropertySymbols, nil, "getOwnPropertySymbols", nil, 1), true, false, true)
- o._putProp("create", r.newNativeFunc(r.object_create, nil, "create", nil, 2), true, false, true)
- o._putProp("seal", r.newNativeFunc(r.object_seal, nil, "seal", nil, 1), true, false, true)
- o._putProp("freeze", r.newNativeFunc(r.object_freeze, nil, "freeze", nil, 1), true, false, true)
- o._putProp("preventExtensions", r.newNativeFunc(r.object_preventExtensions, nil, "preventExtensions", nil, 1), true, false, true)
- o._putProp("isSealed", r.newNativeFunc(r.object_isSealed, nil, "isSealed", nil, 1), true, false, true)
- o._putProp("isFrozen", r.newNativeFunc(r.object_isFrozen, nil, "isFrozen", nil, 1), true, false, true)
- o._putProp("isExtensible", r.newNativeFunc(r.object_isExtensible, nil, "isExtensible", nil, 1), true, false, true)
- o._putProp("keys", r.newNativeFunc(r.object_keys, nil, "keys", nil, 1), true, false, true)
- o._putProp("setPrototypeOf", r.newNativeFunc(r.object_setPrototypeOf, nil, "setPrototypeOf", nil, 2), true, false, true)
- o._putProp("values", r.newNativeFunc(r.object_values, nil, "values", nil, 1), true, false, true)
- o._putProp("fromEntries", r.newNativeFunc(r.object_fromEntries, nil, "fromEntries", nil, 1), true, false, true)
- o._putProp("hasOwn", r.newNativeFunc(r.object_hasOwn, nil, "hasOwn", nil, 2), true, false, true)
-
- r.addToGlobal("Object", r.global.Object)
+func createObjectTemplate() *objectTemplate {
+ t := newObjectTemplate()
+ t.protoFactory = func(r *Runtime) *Object {
+ return r.getFunctionPrototype()
+ }
+
+ t.putStr("length", func(r *Runtime) Value { return valueProp(intToValue(1), false, false, true) })
+ t.putStr("name", func(r *Runtime) Value { return valueProp(asciiString("Object"), false, false, true) })
+
+ t.putStr("prototype", func(r *Runtime) Value { return valueProp(r.global.ObjectPrototype, false, false, false) })
+
+ t.putStr("assign", func(r *Runtime) Value { return r.methodProp(r.object_assign, "assign", 2) })
+ t.putStr("defineProperty", func(r *Runtime) Value { return r.methodProp(r.object_defineProperty, "defineProperty", 3) })
+ t.putStr("defineProperties", func(r *Runtime) Value { return r.methodProp(r.object_defineProperties, "defineProperties", 2) })
+ t.putStr("entries", func(r *Runtime) Value { return r.methodProp(r.object_entries, "entries", 1) })
+ t.putStr("getOwnPropertyDescriptor", func(r *Runtime) Value {
+ return r.methodProp(r.object_getOwnPropertyDescriptor, "getOwnPropertyDescriptor", 2)
+ })
+ t.putStr("getOwnPropertyDescriptors", func(r *Runtime) Value {
+ return r.methodProp(r.object_getOwnPropertyDescriptors, "getOwnPropertyDescriptors", 1)
+ })
+ t.putStr("getPrototypeOf", func(r *Runtime) Value { return r.methodProp(r.object_getPrototypeOf, "getPrototypeOf", 1) })
+ t.putStr("is", func(r *Runtime) Value { return r.methodProp(r.object_is, "is", 2) })
+ t.putStr("getOwnPropertyNames", func(r *Runtime) Value { return r.methodProp(r.object_getOwnPropertyNames, "getOwnPropertyNames", 1) })
+ t.putStr("getOwnPropertySymbols", func(r *Runtime) Value {
+ return r.methodProp(r.object_getOwnPropertySymbols, "getOwnPropertySymbols", 1)
+ })
+ t.putStr("create", func(r *Runtime) Value { return r.methodProp(r.object_create, "create", 2) })
+ t.putStr("seal", func(r *Runtime) Value { return r.methodProp(r.object_seal, "seal", 1) })
+ t.putStr("freeze", func(r *Runtime) Value { return r.methodProp(r.object_freeze, "freeze", 1) })
+ t.putStr("preventExtensions", func(r *Runtime) Value { return r.methodProp(r.object_preventExtensions, "preventExtensions", 1) })
+ t.putStr("isSealed", func(r *Runtime) Value { return r.methodProp(r.object_isSealed, "isSealed", 1) })
+ t.putStr("isFrozen", func(r *Runtime) Value { return r.methodProp(r.object_isFrozen, "isFrozen", 1) })
+ t.putStr("isExtensible", func(r *Runtime) Value { return r.methodProp(r.object_isExtensible, "isExtensible", 1) })
+ t.putStr("keys", func(r *Runtime) Value { return r.methodProp(r.object_keys, "keys", 1) })
+ t.putStr("setPrototypeOf", func(r *Runtime) Value { return r.methodProp(r.object_setPrototypeOf, "setPrototypeOf", 2) })
+ t.putStr("values", func(r *Runtime) Value { return r.methodProp(r.object_values, "values", 1) })
+ t.putStr("fromEntries", func(r *Runtime) Value { return r.methodProp(r.object_fromEntries, "fromEntries", 1) })
+ t.putStr("hasOwn", func(r *Runtime) Value { return r.methodProp(r.object_hasOwn, "hasOwn", 2) })
+
+ return t
+}
+
+var _objectTemplate *objectTemplate
+var objectTemplateOnce sync.Once
+
+func getObjectTemplate() *objectTemplate {
+ objectTemplateOnce.Do(func() {
+ _objectTemplate = createObjectTemplate()
+ })
+ return _objectTemplate
+}
+
+func (r *Runtime) getObject() *Object {
+ ret := r.global.Object
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Object = ret
+ r.newTemplatedFuncObject(getObjectTemplate(), ret, func(call FunctionCall) Value {
+ return r.builtin_Object(call.Arguments, nil)
+ }, r.builtin_Object)
+ }
+ return ret
+}
+
+/*
+func (r *Runtime) getObjectPrototype() *Object {
+ ret := r.global.ObjectPrototype
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.ObjectPrototype = ret
+ r.newTemplatedObject(getObjectProtoTemplate(), ret)
+ }
+ return ret
+}
+*/
+
+var objectProtoTemplate *objectTemplate
+var objectProtoTemplateOnce sync.Once
+
+func getObjectProtoTemplate() *objectTemplate {
+ objectProtoTemplateOnce.Do(func() {
+ objectProtoTemplate = createObjectProtoTemplate()
+ })
+ return objectProtoTemplate
+}
+
+func createObjectProtoTemplate() *objectTemplate {
+ t := newObjectTemplate()
+
+ // null prototype
+
+ t.putStr("constructor", func(r *Runtime) Value { return valueProp(r.getObject(), true, false, true) })
+
+ t.putStr("toString", func(r *Runtime) Value { return r.methodProp(r.objectproto_toString, "toString", 0) })
+ t.putStr("toLocaleString", func(r *Runtime) Value { return r.methodProp(r.objectproto_toLocaleString, "toLocaleString", 0) })
+ t.putStr("valueOf", func(r *Runtime) Value { return r.methodProp(r.objectproto_valueOf, "valueOf", 0) })
+ t.putStr("hasOwnProperty", func(r *Runtime) Value { return r.methodProp(r.objectproto_hasOwnProperty, "hasOwnProperty", 1) })
+ t.putStr("isPrototypeOf", func(r *Runtime) Value { return r.methodProp(r.objectproto_isPrototypeOf, "isPrototypeOf", 1) })
+ t.putStr("propertyIsEnumerable", func(r *Runtime) Value {
+ return r.methodProp(r.objectproto_propertyIsEnumerable, "propertyIsEnumerable", 1)
+ })
+ t.putStr(__proto__, func(r *Runtime) Value {
+ return &valueProperty{
+ accessor: true,
+ getterFunc: r.newNativeFunc(r.objectproto_getProto, "get __proto__", 0),
+ setterFunc: r.newNativeFunc(r.objectproto_setProto, "set __proto__", 1),
+ configurable: true,
+ }
+ })
+
+ return t
}
diff --git a/vendor/github.com/dop251/goja/builtin_promise.go b/vendor/github.com/dop251/goja/builtin_promise.go
index 96dd2f5fd8..d51f27d17a 100644
--- a/vendor/github.com/dop251/goja/builtin_promise.go
+++ b/vendor/github.com/dop251/goja/builtin_promise.go
@@ -108,7 +108,7 @@ func (p *Promise) createResolvingFunctions() (resolve, reject *Object) {
}
}
return p.fulfill(resolution)
- }, nil, "", nil, 1),
+ }, "", 1),
p.val.runtime.newNativeFunc(func(call FunctionCall) Value {
if alreadyResolved {
return _undefined
@@ -116,7 +116,7 @@ func (p *Promise) createResolvingFunctions() (resolve, reject *Object) {
alreadyResolved = true
reason := call.Argument(0)
return p.reject(reason)
- }, nil, "", nil, 1)
+ }, "", 1)
}
func (p *Promise) reject(reason Value) Value {
@@ -253,7 +253,7 @@ func (r *Runtime) builtin_newPromise(args []Value, newTarget *Object) *Object {
}
executor := r.toCallable(arg0)
- proto := r.getPrototypeFromCtor(newTarget, r.global.Promise, r.global.PromisePrototype)
+ proto := r.getPrototypeFromCtor(newTarget, r.global.Promise, r.getPromisePrototype())
po := r.newPromise(proto)
resolve, reject := po.createResolvingFunctions()
@@ -271,7 +271,7 @@ func (r *Runtime) builtin_newPromise(args []Value, newTarget *Object) *Object {
func (r *Runtime) promiseProto_then(call FunctionCall) Value {
thisObj := r.toObject(call.This)
if p, ok := thisObj.self.(*Promise); ok {
- c := r.speciesConstructorObj(thisObj, r.global.Promise)
+ c := r.speciesConstructorObj(thisObj, r.getPromise())
resultCapability := r.newPromiseCapability(c)
return r.performPromiseThen(p, call.Argument(0), call.Argument(1), resultCapability)
}
@@ -280,8 +280,8 @@ func (r *Runtime) promiseProto_then(call FunctionCall) Value {
func (r *Runtime) newPromiseCapability(c *Object) *promiseCapability {
pcap := new(promiseCapability)
- if c == r.global.Promise {
- p := r.newPromise(r.global.PromisePrototype)
+ if c == r.getPromise() {
+ p := r.newPromise(r.getPromisePrototype())
pcap.resolveObj, pcap.rejectObj = p.createResolvingFunctions()
pcap.promise = p.val
} else {
@@ -300,7 +300,7 @@ func (r *Runtime) newPromiseCapability(c *Object) *promiseCapability {
reject = arg
}
return nil
- }, nil, "", nil, 2)
+ }, "", 2)
pcap.promise = r.toConstructor(c)([]Value{executor}, c)
pcap.resolveObj = r.toObject(resolve)
r.toCallable(pcap.resolveObj) // make sure it's callable
@@ -353,7 +353,7 @@ func (r *Runtime) promiseResolve(c *Object, x Value) *Object {
func (r *Runtime) promiseProto_finally(call FunctionCall) Value {
promise := r.toObject(call.This)
- c := r.speciesConstructorObj(promise, r.global.Promise)
+ c := r.speciesConstructorObj(promise, r.getPromise())
onFinally := call.Argument(0)
var thenFinally, catchFinally Value
if onFinallyFn, ok := assertCallable(onFinally); !ok {
@@ -365,9 +365,9 @@ func (r *Runtime) promiseProto_finally(call FunctionCall) Value {
promise := r.promiseResolve(c, result)
valueThunk := r.newNativeFunc(func(call FunctionCall) Value {
return value
- }, nil, "", nil, 0)
+ }, "", 0)
return r.invoke(promise, "then", valueThunk)
- }, nil, "", nil, 1)
+ }, "", 1)
catchFinally = r.newNativeFunc(func(call FunctionCall) Value {
reason := call.Argument(0)
@@ -375,9 +375,9 @@ func (r *Runtime) promiseProto_finally(call FunctionCall) Value {
promise := r.promiseResolve(c, result)
thrower := r.newNativeFunc(func(call FunctionCall) Value {
panic(reason)
- }, nil, "", nil, 0)
+ }, "", 0)
return r.invoke(promise, "then", thrower)
- }, nil, "", nil, 1)
+ }, "", 1)
}
return r.invoke(promise, "then", thenFinally, catchFinally)
}
@@ -424,7 +424,7 @@ func (r *Runtime) promise_all(call FunctionCall) Value {
pcap.resolve(r.newArrayValues(values))
}
return _undefined
- }, nil, "", nil, 1)
+ }, "", 1)
remainingElementsCount++
r.invoke(nextPromise, "then", onFulfilled, pcap.rejectObj)
})
@@ -465,7 +465,7 @@ func (r *Runtime) promise_allSettled(call FunctionCall) Value {
pcap.resolve(r.newArrayValues(values))
}
return _undefined
- }, nil, "", nil, 1)
+ }, "", 1)
}
onFulfilled := reaction(asciiString("fulfilled"), "value")
onRejected := reaction(asciiString("rejected"), "reason")
@@ -502,19 +502,19 @@ func (r *Runtime) promise_any(call FunctionCall) Value {
errors[index] = call.Argument(0)
remainingElementsCount--
if remainingElementsCount == 0 {
- _error := r.builtin_new(r.global.AggregateError, nil)
+ _error := r.builtin_new(r.getAggregateError(), nil)
_error.self._putProp("errors", r.newArrayValues(errors), true, false, true)
pcap.reject(_error)
}
return _undefined
- }, nil, "", nil, 1)
+ }, "", 1)
remainingElementsCount++
r.invoke(nextPromise, "then", pcap.resolveObj, onRejected)
})
remainingElementsCount--
if remainingElementsCount == 0 {
- _error := r.builtin_new(r.global.AggregateError, nil)
+ _error := r.builtin_new(r.getAggregateError(), nil)
_error.self._putProp("errors", r.newArrayValues(errors), true, false, true)
pcap.reject(_error)
}
@@ -549,11 +549,11 @@ func (r *Runtime) promise_resolve(call FunctionCall) Value {
func (r *Runtime) createPromiseProto(val *Object) objectImpl {
o := newBaseObjectObj(val, r.global.ObjectPrototype, classObject)
- o._putProp("constructor", r.global.Promise, true, false, true)
+ o._putProp("constructor", r.getPromise(), true, false, true)
- o._putProp("catch", r.newNativeFunc(r.promiseProto_catch, nil, "catch", nil, 1), true, false, true)
- o._putProp("finally", r.newNativeFunc(r.promiseProto_finally, nil, "finally", nil, 1), true, false, true)
- o._putProp("then", r.newNativeFunc(r.promiseProto_then, nil, "then", nil, 2), true, false, true)
+ o._putProp("catch", r.newNativeFunc(r.promiseProto_catch, "catch", 1), true, false, true)
+ o._putProp("finally", r.newNativeFunc(r.promiseProto_finally, "finally", 1), true, false, true)
+ o._putProp("then", r.newNativeFunc(r.promiseProto_then, "then", 2), true, false, true)
o._putSym(SymToStringTag, valueProp(asciiString(classPromise), false, false, true))
@@ -561,25 +561,38 @@ func (r *Runtime) createPromiseProto(val *Object) objectImpl {
}
func (r *Runtime) createPromise(val *Object) objectImpl {
- o := r.newNativeConstructOnly(val, r.builtin_newPromise, r.global.PromisePrototype, "Promise", 1)
+ o := r.newNativeConstructOnly(val, r.builtin_newPromise, r.getPromisePrototype(), "Promise", 1)
- o._putProp("all", r.newNativeFunc(r.promise_all, nil, "all", nil, 1), true, false, true)
- o._putProp("allSettled", r.newNativeFunc(r.promise_allSettled, nil, "allSettled", nil, 1), true, false, true)
- o._putProp("any", r.newNativeFunc(r.promise_any, nil, "any", nil, 1), true, false, true)
- o._putProp("race", r.newNativeFunc(r.promise_race, nil, "race", nil, 1), true, false, true)
- o._putProp("reject", r.newNativeFunc(r.promise_reject, nil, "reject", nil, 1), true, false, true)
- o._putProp("resolve", r.newNativeFunc(r.promise_resolve, nil, "resolve", nil, 1), true, false, true)
+ o._putProp("all", r.newNativeFunc(r.promise_all, "all", 1), true, false, true)
+ o._putProp("allSettled", r.newNativeFunc(r.promise_allSettled, "allSettled", 1), true, false, true)
+ o._putProp("any", r.newNativeFunc(r.promise_any, "any", 1), true, false, true)
+ o._putProp("race", r.newNativeFunc(r.promise_race, "race", 1), true, false, true)
+ o._putProp("reject", r.newNativeFunc(r.promise_reject, "reject", 1), true, false, true)
+ o._putProp("resolve", r.newNativeFunc(r.promise_resolve, "resolve", 1), true, false, true)
r.putSpeciesReturnThis(o)
return o
}
-func (r *Runtime) initPromise() {
- r.global.PromisePrototype = r.newLazyObject(r.createPromiseProto)
- r.global.Promise = r.newLazyObject(r.createPromise)
+func (r *Runtime) getPromisePrototype() *Object {
+ ret := r.global.PromisePrototype
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.PromisePrototype = ret
+ ret.self = r.createPromiseProto(ret)
+ }
+ return ret
+}
- r.addToGlobal("Promise", r.global.Promise)
+func (r *Runtime) getPromise() *Object {
+ ret := r.global.Promise
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Promise = ret
+ ret.self = r.createPromise(ret)
+ }
+ return ret
}
func (r *Runtime) wrapPromiseReaction(fObj *Object) func(interface{}) {
@@ -609,7 +622,7 @@ func (r *Runtime) wrapPromiseReaction(fObj *Object) func(interface{}) {
// }()
// }
func (r *Runtime) NewPromise() (promise *Promise, resolve func(result interface{}), reject func(reason interface{})) {
- p := r.newPromise(r.global.PromisePrototype)
+ p := r.newPromise(r.getPromisePrototype())
resolveF, rejectF := p.createResolvingFunctions()
return p, r.wrapPromiseReaction(resolveF), r.wrapPromiseReaction(rejectF)
}
diff --git a/vendor/github.com/dop251/goja/builtin_proxy.go b/vendor/github.com/dop251/goja/builtin_proxy.go
index ee62f75e45..f589930677 100644
--- a/vendor/github.com/dop251/goja/builtin_proxy.go
+++ b/vendor/github.com/dop251/goja/builtin_proxy.go
@@ -345,7 +345,7 @@ func (r *Runtime) builtin_newProxy(args []Value, newTarget *Object) *Object {
if newTarget == nil {
panic(r.needNew("Proxy"))
}
- return r.newProxy(args, r.getPrototypeFromCtor(newTarget, r.global.Proxy, r.global.ObjectPrototype))
+ return r.newProxy(args, r.getPrototypeFromCtor(newTarget, r.getProxy(), r.global.ObjectPrototype))
}
func (r *Runtime) NewProxy(target *Object, nativeHandler *ProxyTrapConfig) Proxy {
@@ -367,7 +367,7 @@ func (r *Runtime) builtin_proxy_revocable(call FunctionCall) Value {
revoke := r.newNativeFunc(func(FunctionCall) Value {
proxy.revoke()
return _undefined
- }, nil, "", nil, 0)
+ }, "", 0)
ret := r.NewObject()
ret.self._putProp("proxy", proxy.val, true, true, true)
ret.self._putProp("revoke", revoke, true, true, true)
@@ -381,11 +381,16 @@ func (r *Runtime) builtin_proxy_revocable(call FunctionCall) Value {
func (r *Runtime) createProxy(val *Object) objectImpl {
o := r.newNativeConstructOnly(val, r.builtin_newProxy, nil, "Proxy", 2)
- o._putProp("revocable", r.newNativeFunc(r.builtin_proxy_revocable, nil, "revocable", nil, 2), true, false, true)
+ o._putProp("revocable", r.newNativeFunc(r.builtin_proxy_revocable, "revocable", 2), true, false, true)
return o
}
-func (r *Runtime) initProxy() {
- r.global.Proxy = r.newLazyObject(r.createProxy)
- r.addToGlobal("Proxy", r.global.Proxy)
+func (r *Runtime) getProxy() *Object {
+ ret := r.global.Proxy
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Proxy = ret
+ r.createProxy(ret)
+ }
+ return ret
}
diff --git a/vendor/github.com/dop251/goja/builtin_reflect.go b/vendor/github.com/dop251/goja/builtin_reflect.go
index 68a2bb0524..17bb11a36d 100644
--- a/vendor/github.com/dop251/goja/builtin_reflect.go
+++ b/vendor/github.com/dop251/goja/builtin_reflect.go
@@ -110,25 +110,31 @@ func (r *Runtime) builtin_reflect_setPrototypeOf(call FunctionCall) Value {
func (r *Runtime) createReflect(val *Object) objectImpl {
o := newBaseObjectObj(val, r.global.ObjectPrototype, classObject)
- o._putProp("apply", r.newNativeFunc(r.builtin_reflect_apply, nil, "apply", nil, 3), true, false, true)
- o._putProp("construct", r.newNativeFunc(r.builtin_reflect_construct, nil, "construct", nil, 2), true, false, true)
- o._putProp("defineProperty", r.newNativeFunc(r.builtin_reflect_defineProperty, nil, "defineProperty", nil, 3), true, false, true)
- o._putProp("deleteProperty", r.newNativeFunc(r.builtin_reflect_deleteProperty, nil, "deleteProperty", nil, 2), true, false, true)
- o._putProp("get", r.newNativeFunc(r.builtin_reflect_get, nil, "get", nil, 2), true, false, true)
- o._putProp("getOwnPropertyDescriptor", r.newNativeFunc(r.builtin_reflect_getOwnPropertyDescriptor, nil, "getOwnPropertyDescriptor", nil, 2), true, false, true)
- o._putProp("getPrototypeOf", r.newNativeFunc(r.builtin_reflect_getPrototypeOf, nil, "getPrototypeOf", nil, 1), true, false, true)
- o._putProp("has", r.newNativeFunc(r.builtin_reflect_has, nil, "has", nil, 2), true, false, true)
- o._putProp("isExtensible", r.newNativeFunc(r.builtin_reflect_isExtensible, nil, "isExtensible", nil, 1), true, false, true)
- o._putProp("ownKeys", r.newNativeFunc(r.builtin_reflect_ownKeys, nil, "ownKeys", nil, 1), true, false, true)
- o._putProp("preventExtensions", r.newNativeFunc(r.builtin_reflect_preventExtensions, nil, "preventExtensions", nil, 1), true, false, true)
- o._putProp("set", r.newNativeFunc(r.builtin_reflect_set, nil, "set", nil, 3), true, false, true)
- o._putProp("setPrototypeOf", r.newNativeFunc(r.builtin_reflect_setPrototypeOf, nil, "setPrototypeOf", nil, 2), true, false, true)
+ o._putProp("apply", r.newNativeFunc(r.builtin_reflect_apply, "apply", 3), true, false, true)
+ o._putProp("construct", r.newNativeFunc(r.builtin_reflect_construct, "construct", 2), true, false, true)
+ o._putProp("defineProperty", r.newNativeFunc(r.builtin_reflect_defineProperty, "defineProperty", 3), true, false, true)
+ o._putProp("deleteProperty", r.newNativeFunc(r.builtin_reflect_deleteProperty, "deleteProperty", 2), true, false, true)
+ o._putProp("get", r.newNativeFunc(r.builtin_reflect_get, "get", 2), true, false, true)
+ o._putProp("getOwnPropertyDescriptor", r.newNativeFunc(r.builtin_reflect_getOwnPropertyDescriptor, "getOwnPropertyDescriptor", 2), true, false, true)
+ o._putProp("getPrototypeOf", r.newNativeFunc(r.builtin_reflect_getPrototypeOf, "getPrototypeOf", 1), true, false, true)
+ o._putProp("has", r.newNativeFunc(r.builtin_reflect_has, "has", 2), true, false, true)
+ o._putProp("isExtensible", r.newNativeFunc(r.builtin_reflect_isExtensible, "isExtensible", 1), true, false, true)
+ o._putProp("ownKeys", r.newNativeFunc(r.builtin_reflect_ownKeys, "ownKeys", 1), true, false, true)
+ o._putProp("preventExtensions", r.newNativeFunc(r.builtin_reflect_preventExtensions, "preventExtensions", 1), true, false, true)
+ o._putProp("set", r.newNativeFunc(r.builtin_reflect_set, "set", 3), true, false, true)
+ o._putProp("setPrototypeOf", r.newNativeFunc(r.builtin_reflect_setPrototypeOf, "setPrototypeOf", 2), true, false, true)
o._putSym(SymToStringTag, valueProp(asciiString("Reflect"), false, false, true))
return o
}
-func (r *Runtime) initReflect() {
- r.addToGlobal("Reflect", r.newLazyObject(r.createReflect))
+func (r *Runtime) getReflect() *Object {
+ ret := r.global.Reflect
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Reflect = ret
+ ret.self = r.createReflect(ret)
+ }
+ return ret
}
diff --git a/vendor/github.com/dop251/goja/builtin_regexp.go b/vendor/github.com/dop251/goja/builtin_regexp.go
index 7ac8ddc609..89402820e9 100644
--- a/vendor/github.com/dop251/goja/builtin_regexp.go
+++ b/vendor/github.com/dop251/goja/builtin_regexp.go
@@ -347,7 +347,7 @@ func (r *Runtime) builtin_RegExp(call FunctionCall) Value {
}
}
}
- return r.newRegExp(pattern, flags, r.global.RegExpPrototype).val
+ return r.newRegExp(pattern, flags, r.getRegExpPrototype()).val
}
func (r *Runtime) regexpproto_compile(call FunctionCall) Value {
@@ -771,7 +771,7 @@ func (r *Runtime) regexpproto_stdMatcherAll(call FunctionCall) Value {
thisObj := r.toObject(call.This)
s := call.Argument(0).toString()
flags := nilSafe(thisObj.self.getStr("flags", nil)).toString()
- c := r.speciesConstructorObj(call.This.(*Object), r.global.RegExp)
+ c := r.speciesConstructorObj(call.This.(*Object), r.getRegExp())
matcher := r.toConstructor(c)([]Value{call.This, flags}, nil)
matcher.self.setOwnStr("lastIndex", valueInt(toLength(thisObj.self.getStr("lastIndex", nil))), true)
flagsStr := flags.String()
@@ -963,7 +963,7 @@ func (r *Runtime) regexpproto_stdSplitter(call FunctionCall) Value {
limitValue := call.Argument(1)
var splitter *Object
search := r.checkStdRegexp(rxObj)
- c := r.speciesConstructorObj(rxObj, r.global.RegExp)
+ c := r.speciesConstructorObj(rxObj, r.getRegExp())
if search == nil || c != r.global.RegExp {
flags := nilSafe(rxObj.self.getStr("flags", nil)).toString()
flagsStr := flags.String()
@@ -1214,7 +1214,7 @@ func (r *Runtime) regExpStringIteratorProto_next(call FunctionCall) Value {
func (r *Runtime) createRegExpStringIteratorPrototype(val *Object) objectImpl {
o := newBaseObjectObj(val, r.getIteratorPrototype(), classObject)
- o._putProp("next", r.newNativeFunc(r.regExpStringIteratorProto_next, nil, "next", nil, 0), true, false, true)
+ o._putProp("next", r.newNativeFunc(r.regExpStringIteratorProto_next, "next", 0), true, false, true)
o._putSym(SymToStringTag, valueProp(asciiString(classRegExpStringIterator), false, false, true))
return o
@@ -1230,60 +1230,75 @@ func (r *Runtime) getRegExpStringIteratorPrototype() *Object {
return o
}
-func (r *Runtime) initRegExp() {
- o := r.newGuardedObject(r.global.ObjectPrototype, classObject)
- r.global.RegExpPrototype = o.val
- r.global.stdRegexpProto = o
-
- o._putProp("compile", r.newNativeFunc(r.regexpproto_compile, nil, "compile", nil, 2), true, false, true)
- o._putProp("exec", r.newNativeFunc(r.regexpproto_exec, nil, "exec", nil, 1), true, false, true)
- o._putProp("test", r.newNativeFunc(r.regexpproto_test, nil, "test", nil, 1), true, false, true)
- o._putProp("toString", r.newNativeFunc(r.regexpproto_toString, nil, "toString", nil, 0), true, false, true)
- o.setOwnStr("source", &valueProperty{
- configurable: true,
- getterFunc: r.newNativeFunc(r.regexpproto_getSource, nil, "get source", nil, 0),
- accessor: true,
- }, false)
- o.setOwnStr("global", &valueProperty{
- configurable: true,
- getterFunc: r.newNativeFunc(r.regexpproto_getGlobal, nil, "get global", nil, 0),
- accessor: true,
- }, false)
- o.setOwnStr("multiline", &valueProperty{
- configurable: true,
- getterFunc: r.newNativeFunc(r.regexpproto_getMultiline, nil, "get multiline", nil, 0),
- accessor: true,
- }, false)
- o.setOwnStr("ignoreCase", &valueProperty{
- configurable: true,
- getterFunc: r.newNativeFunc(r.regexpproto_getIgnoreCase, nil, "get ignoreCase", nil, 0),
- accessor: true,
- }, false)
- o.setOwnStr("unicode", &valueProperty{
- configurable: true,
- getterFunc: r.newNativeFunc(r.regexpproto_getUnicode, nil, "get unicode", nil, 0),
- accessor: true,
- }, false)
- o.setOwnStr("sticky", &valueProperty{
- configurable: true,
- getterFunc: r.newNativeFunc(r.regexpproto_getSticky, nil, "get sticky", nil, 0),
- accessor: true,
- }, false)
- o.setOwnStr("flags", &valueProperty{
- configurable: true,
- getterFunc: r.newNativeFunc(r.regexpproto_getFlags, nil, "get flags", nil, 0),
- accessor: true,
- }, false)
-
- o._putSym(SymMatch, valueProp(r.newNativeFunc(r.regexpproto_stdMatcher, nil, "[Symbol.match]", nil, 1), true, false, true))
- o._putSym(SymMatchAll, valueProp(r.newNativeFunc(r.regexpproto_stdMatcherAll, nil, "[Symbol.matchAll]", nil, 1), true, false, true))
- o._putSym(SymSearch, valueProp(r.newNativeFunc(r.regexpproto_stdSearch, nil, "[Symbol.search]", nil, 1), true, false, true))
- o._putSym(SymSplit, valueProp(r.newNativeFunc(r.regexpproto_stdSplitter, nil, "[Symbol.split]", nil, 2), true, false, true))
- o._putSym(SymReplace, valueProp(r.newNativeFunc(r.regexpproto_stdReplacer, nil, "[Symbol.replace]", nil, 2), true, false, true))
- o.guard("exec", "global", "multiline", "ignoreCase", "unicode", "sticky")
-
- r.global.RegExp = r.newNativeFunc(r.builtin_RegExp, r.builtin_newRegExp, "RegExp", r.global.RegExpPrototype, 2)
- rx := r.global.RegExp.self
- r.putSpeciesReturnThis(rx)
- r.addToGlobal("RegExp", r.global.RegExp)
+func (r *Runtime) getRegExp() *Object {
+ ret := r.global.RegExp
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.RegExp = ret
+ proto := r.getRegExpPrototype()
+ r.newNativeFuncAndConstruct(ret, r.builtin_RegExp,
+ r.wrapNativeConstruct(r.builtin_newRegExp, ret, proto), proto, "RegExp", intToValue(2))
+ rx := ret.self
+ r.putSpeciesReturnThis(rx)
+ }
+ return ret
+}
+
+func (r *Runtime) getRegExpPrototype() *Object {
+ ret := r.global.RegExpPrototype
+ if ret == nil {
+ o := r.newGuardedObject(r.global.ObjectPrototype, classObject)
+ ret = o.val
+ r.global.RegExpPrototype = ret
+ r.global.stdRegexpProto = o
+
+ o._putProp("constructor", r.getRegExp(), true, false, true)
+ o._putProp("compile", r.newNativeFunc(r.regexpproto_compile, "compile", 2), true, false, true)
+ o._putProp("exec", r.newNativeFunc(r.regexpproto_exec, "exec", 1), true, false, true)
+ o._putProp("test", r.newNativeFunc(r.regexpproto_test, "test", 1), true, false, true)
+ o._putProp("toString", r.newNativeFunc(r.regexpproto_toString, "toString", 0), true, false, true)
+ o.setOwnStr("source", &valueProperty{
+ configurable: true,
+ getterFunc: r.newNativeFunc(r.regexpproto_getSource, "get source", 0),
+ accessor: true,
+ }, false)
+ o.setOwnStr("global", &valueProperty{
+ configurable: true,
+ getterFunc: r.newNativeFunc(r.regexpproto_getGlobal, "get global", 0),
+ accessor: true,
+ }, false)
+ o.setOwnStr("multiline", &valueProperty{
+ configurable: true,
+ getterFunc: r.newNativeFunc(r.regexpproto_getMultiline, "get multiline", 0),
+ accessor: true,
+ }, false)
+ o.setOwnStr("ignoreCase", &valueProperty{
+ configurable: true,
+ getterFunc: r.newNativeFunc(r.regexpproto_getIgnoreCase, "get ignoreCase", 0),
+ accessor: true,
+ }, false)
+ o.setOwnStr("unicode", &valueProperty{
+ configurable: true,
+ getterFunc: r.newNativeFunc(r.regexpproto_getUnicode, "get unicode", 0),
+ accessor: true,
+ }, false)
+ o.setOwnStr("sticky", &valueProperty{
+ configurable: true,
+ getterFunc: r.newNativeFunc(r.regexpproto_getSticky, "get sticky", 0),
+ accessor: true,
+ }, false)
+ o.setOwnStr("flags", &valueProperty{
+ configurable: true,
+ getterFunc: r.newNativeFunc(r.regexpproto_getFlags, "get flags", 0),
+ accessor: true,
+ }, false)
+
+ o._putSym(SymMatch, valueProp(r.newNativeFunc(r.regexpproto_stdMatcher, "[Symbol.match]", 1), true, false, true))
+ o._putSym(SymMatchAll, valueProp(r.newNativeFunc(r.regexpproto_stdMatcherAll, "[Symbol.matchAll]", 1), true, false, true))
+ o._putSym(SymSearch, valueProp(r.newNativeFunc(r.regexpproto_stdSearch, "[Symbol.search]", 1), true, false, true))
+ o._putSym(SymSplit, valueProp(r.newNativeFunc(r.regexpproto_stdSplitter, "[Symbol.split]", 2), true, false, true))
+ o._putSym(SymReplace, valueProp(r.newNativeFunc(r.regexpproto_stdReplacer, "[Symbol.replace]", 2), true, false, true))
+ o.guard("exec", "global", "multiline", "ignoreCase", "unicode", "sticky")
+ }
+ return ret
}
diff --git a/vendor/github.com/dop251/goja/builtin_set.go b/vendor/github.com/dop251/goja/builtin_set.go
index 54bbf3c122..eeedb887c7 100644
--- a/vendor/github.com/dop251/goja/builtin_set.go
+++ b/vendor/github.com/dop251/goja/builtin_set.go
@@ -274,25 +274,25 @@ func (r *Runtime) setIterProto_next(call FunctionCall) Value {
func (r *Runtime) createSetProto(val *Object) objectImpl {
o := newBaseObjectObj(val, r.global.ObjectPrototype, classObject)
- o._putProp("constructor", r.global.Set, true, false, true)
- r.global.setAdder = r.newNativeFunc(r.setProto_add, nil, "add", nil, 1)
+ o._putProp("constructor", r.getSet(), true, false, true)
+ r.global.setAdder = r.newNativeFunc(r.setProto_add, "add", 1)
o._putProp("add", r.global.setAdder, true, false, true)
- o._putProp("clear", r.newNativeFunc(r.setProto_clear, nil, "clear", nil, 0), true, false, true)
- o._putProp("delete", r.newNativeFunc(r.setProto_delete, nil, "delete", nil, 1), true, false, true)
- o._putProp("forEach", r.newNativeFunc(r.setProto_forEach, nil, "forEach", nil, 1), true, false, true)
- o._putProp("has", r.newNativeFunc(r.setProto_has, nil, "has", nil, 1), true, false, true)
+ o._putProp("clear", r.newNativeFunc(r.setProto_clear, "clear", 0), true, false, true)
+ o._putProp("delete", r.newNativeFunc(r.setProto_delete, "delete", 1), true, false, true)
+ o._putProp("forEach", r.newNativeFunc(r.setProto_forEach, "forEach", 1), true, false, true)
+ o._putProp("has", r.newNativeFunc(r.setProto_has, "has", 1), true, false, true)
o.setOwnStr("size", &valueProperty{
- getterFunc: r.newNativeFunc(r.setProto_getSize, nil, "get size", nil, 0),
+ getterFunc: r.newNativeFunc(r.setProto_getSize, "get size", 0),
accessor: true,
writable: true,
configurable: true,
}, true)
- valuesFunc := r.newNativeFunc(r.setProto_values, nil, "values", nil, 0)
+ valuesFunc := r.newNativeFunc(r.setProto_values, "values", 0)
o._putProp("values", valuesFunc, true, false, true)
o._putProp("keys", valuesFunc, true, false, true)
- o._putProp("entries", r.newNativeFunc(r.setProto_entries, nil, "entries", nil, 0), true, false, true)
+ o._putProp("entries", r.newNativeFunc(r.setProto_entries, "entries", 0), true, false, true)
o._putSym(SymIterator, valueProp(valuesFunc, true, false, true))
o._putSym(SymToStringTag, valueProp(asciiString(classSet), false, false, true))
@@ -300,7 +300,7 @@ func (r *Runtime) createSetProto(val *Object) objectImpl {
}
func (r *Runtime) createSet(val *Object) objectImpl {
- o := r.newNativeConstructOnly(val, r.builtin_newSet, r.global.SetPrototype, "Set", 0)
+ o := r.newNativeConstructOnly(val, r.builtin_newSet, r.getSetPrototype(), "Set", 0)
r.putSpeciesReturnThis(o)
return o
@@ -309,7 +309,7 @@ func (r *Runtime) createSet(val *Object) objectImpl {
func (r *Runtime) createSetIterProto(val *Object) objectImpl {
o := newBaseObjectObj(val, r.getIteratorPrototype(), classObject)
- o._putProp("next", r.newNativeFunc(r.setIterProto_next, nil, "next", nil, 0), true, false, true)
+ o._putProp("next", r.newNativeFunc(r.setIterProto_next, "next", 0), true, false, true)
o._putSym(SymToStringTag, valueProp(asciiString(classSetIterator), false, false, true))
return o
@@ -325,9 +325,22 @@ func (r *Runtime) getSetIteratorPrototype() *Object {
return o
}
-func (r *Runtime) initSet() {
- r.global.SetPrototype = r.newLazyObject(r.createSetProto)
- r.global.Set = r.newLazyObject(r.createSet)
+func (r *Runtime) getSetPrototype() *Object {
+ ret := r.global.SetPrototype
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.SetPrototype = ret
+ ret.self = r.createSetProto(ret)
+ }
+ return ret
+}
- r.addToGlobal("Set", r.global.Set)
+func (r *Runtime) getSet() *Object {
+ ret := r.global.Set
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Set = ret
+ ret.self = r.createSet(ret)
+ }
+ return ret
}
diff --git a/vendor/github.com/dop251/goja/builtin_string.go b/vendor/github.com/dop251/goja/builtin_string.go
index 5e35dc86a7..b2a0e4ea81 100644
--- a/vendor/github.com/dop251/goja/builtin_string.go
+++ b/vendor/github.com/dop251/goja/builtin_string.go
@@ -4,6 +4,7 @@ import (
"github.com/dop251/goja/unistring"
"math"
"strings"
+ "sync"
"unicode/utf16"
"unicode/utf8"
@@ -82,6 +83,9 @@ func (r *Runtime) stringproto_toStringValueOf(this Value, funcName string) Value
return valueOf()
}
}
+ if obj == r.global.StringPrototype {
+ return stringEmpty
+ }
}
r.typeErrorResult(true, "String.prototype.%s is called on incompatible receiver", funcName)
return nil
@@ -131,11 +135,11 @@ func (r *Runtime) string_fromcodepoint(call FunctionCall) Value {
var c rune
if numInt, ok := num.(valueInt); ok {
if numInt < 0 || numInt > utf8.MaxRune {
- panic(r.newError(r.global.RangeError, "Invalid code point %d", numInt))
+ panic(r.newError(r.getRangeError(), "Invalid code point %d", numInt))
}
c = rune(numInt)
} else {
- panic(r.newError(r.global.RangeError, "Invalid code point %s", num))
+ panic(r.newError(r.getRangeError(), "Invalid code point %s", num))
}
sb.WriteRune(c)
}
@@ -391,7 +395,7 @@ func (r *Runtime) stringproto_match(call FunctionCall) Value {
}
if rx == nil {
- rx = r.newRegExp(regexp, nil, r.global.RegExpPrototype)
+ rx = r.newRegExp(regexp, nil, r.getRegExpPrototype())
}
if matcher, ok := r.toObject(rx.getSym(SymMatch, nil)).self.assertCallable(); ok {
@@ -425,7 +429,7 @@ func (r *Runtime) stringproto_matchAll(call FunctionCall) Value {
}
}
- rx := r.newRegExp(regexp, asciiString("g"), r.global.RegExpPrototype)
+ rx := r.newRegExp(regexp, asciiString("g"), r.getRegExpPrototype())
if matcher, ok := r.toObject(rx.getSym(SymMatchAll, nil)).self.assertCallable(); ok {
return matcher(FunctionCall{
@@ -457,7 +461,7 @@ func (r *Runtime) stringproto_normalize(call FunctionCall) Value {
case "NFKD":
f = norm.NFKD
default:
- panic(r.newError(r.global.RangeError, "The normalization form should be one of NFC, NFD, NFKC, NFKD"))
+ panic(r.newError(r.getRangeError(), "The normalization form should be one of NFC, NFD, NFKC, NFKD"))
}
switch s := s.(type) {
@@ -551,11 +555,11 @@ func (r *Runtime) stringproto_repeat(call FunctionCall) Value {
s := call.This.toString()
n := call.Argument(0).ToNumber()
if n == _positiveInf {
- panic(r.newError(r.global.RangeError, "Invalid count value"))
+ panic(r.newError(r.getRangeError(), "Invalid count value"))
}
numInt := n.ToInteger()
if numInt < 0 {
- panic(r.newError(r.global.RangeError, "Invalid count value"))
+ panic(r.newError(r.getRangeError(), "Invalid count value"))
}
if numInt == 0 || s.Length() == 0 {
return stringEmpty
@@ -736,7 +740,7 @@ func (r *Runtime) stringproto_search(call FunctionCall) Value {
}
if rx == nil {
- rx = r.newRegExp(regexp, nil, r.global.RegExpPrototype)
+ rx = r.newRegExp(regexp, nil, r.getRegExpPrototype())
}
if searcher, ok := r.toObject(rx.getSym(SymSearch, nil)).self.assertCallable(); ok {
@@ -977,7 +981,7 @@ func (r *Runtime) stringIterProto_next(call FunctionCall) Value {
func (r *Runtime) createStringIterProto(val *Object) objectImpl {
o := newBaseObjectObj(val, r.getIteratorPrototype(), classObject)
- o._putProp("next", r.newNativeFunc(r.stringIterProto_next, nil, "next", nil, 0), true, false, true)
+ o._putProp("next", r.newNativeFunc(r.stringIterProto_next, "next", 0), true, false, true)
o._putSym(SymToStringTag, valueProp(asciiString(classStringIterator), false, false, true))
return o
@@ -993,59 +997,120 @@ func (r *Runtime) getStringIteratorPrototype() *Object {
return o
}
-func (r *Runtime) initString() {
- r.global.StringPrototype = r.builtin_newString([]Value{stringEmpty}, r.global.ObjectPrototype)
-
- o := r.global.StringPrototype.self
- o._putProp("at", r.newNativeFunc(r.stringproto_at, nil, "at", nil, 1), true, false, true)
- o._putProp("charAt", r.newNativeFunc(r.stringproto_charAt, nil, "charAt", nil, 1), true, false, true)
- o._putProp("charCodeAt", r.newNativeFunc(r.stringproto_charCodeAt, nil, "charCodeAt", nil, 1), true, false, true)
- o._putProp("codePointAt", r.newNativeFunc(r.stringproto_codePointAt, nil, "codePointAt", nil, 1), true, false, true)
- o._putProp("concat", r.newNativeFunc(r.stringproto_concat, nil, "concat", nil, 1), true, false, true)
- o._putProp("endsWith", r.newNativeFunc(r.stringproto_endsWith, nil, "endsWith", nil, 1), true, false, true)
- o._putProp("includes", r.newNativeFunc(r.stringproto_includes, nil, "includes", nil, 1), true, false, true)
- o._putProp("indexOf", r.newNativeFunc(r.stringproto_indexOf, nil, "indexOf", nil, 1), true, false, true)
- o._putProp("lastIndexOf", r.newNativeFunc(r.stringproto_lastIndexOf, nil, "lastIndexOf", nil, 1), true, false, true)
- o._putProp("localeCompare", r.newNativeFunc(r.stringproto_localeCompare, nil, "localeCompare", nil, 1), true, false, true)
- o._putProp("match", r.newNativeFunc(r.stringproto_match, nil, "match", nil, 1), true, false, true)
- o._putProp("matchAll", r.newNativeFunc(r.stringproto_matchAll, nil, "matchAll", nil, 1), true, false, true)
- o._putProp("normalize", r.newNativeFunc(r.stringproto_normalize, nil, "normalize", nil, 0), true, false, true)
- o._putProp("padEnd", r.newNativeFunc(r.stringproto_padEnd, nil, "padEnd", nil, 1), true, false, true)
- o._putProp("padStart", r.newNativeFunc(r.stringproto_padStart, nil, "padStart", nil, 1), true, false, true)
- o._putProp("repeat", r.newNativeFunc(r.stringproto_repeat, nil, "repeat", nil, 1), true, false, true)
- o._putProp("replace", r.newNativeFunc(r.stringproto_replace, nil, "replace", nil, 2), true, false, true)
- o._putProp("replaceAll", r.newNativeFunc(r.stringproto_replaceAll, nil, "replaceAll", nil, 2), true, false, true)
- o._putProp("search", r.newNativeFunc(r.stringproto_search, nil, "search", nil, 1), true, false, true)
- o._putProp("slice", r.newNativeFunc(r.stringproto_slice, nil, "slice", nil, 2), true, false, true)
- o._putProp("split", r.newNativeFunc(r.stringproto_split, nil, "split", nil, 2), true, false, true)
- o._putProp("startsWith", r.newNativeFunc(r.stringproto_startsWith, nil, "startsWith", nil, 1), true, false, true)
- o._putProp("substring", r.newNativeFunc(r.stringproto_substring, nil, "substring", nil, 2), true, false, true)
- o._putProp("toLocaleLowerCase", r.newNativeFunc(r.stringproto_toLowerCase, nil, "toLocaleLowerCase", nil, 0), true, false, true)
- o._putProp("toLocaleUpperCase", r.newNativeFunc(r.stringproto_toUpperCase, nil, "toLocaleUpperCase", nil, 0), true, false, true)
- o._putProp("toLowerCase", r.newNativeFunc(r.stringproto_toLowerCase, nil, "toLowerCase", nil, 0), true, false, true)
- o._putProp("toString", r.newNativeFunc(r.stringproto_toString, nil, "toString", nil, 0), true, false, true)
- o._putProp("toUpperCase", r.newNativeFunc(r.stringproto_toUpperCase, nil, "toUpperCase", nil, 0), true, false, true)
- o._putProp("trim", r.newNativeFunc(r.stringproto_trim, nil, "trim", nil, 0), true, false, true)
- trimEnd := r.newNativeFunc(r.stringproto_trimEnd, nil, "trimEnd", nil, 0)
- trimStart := r.newNativeFunc(r.stringproto_trimStart, nil, "trimStart", nil, 0)
- o._putProp("trimEnd", trimEnd, true, false, true)
- o._putProp("trimStart", trimStart, true, false, true)
- o._putProp("trimRight", trimEnd, true, false, true)
- o._putProp("trimLeft", trimStart, true, false, true)
- o._putProp("valueOf", r.newNativeFunc(r.stringproto_valueOf, nil, "valueOf", nil, 0), true, false, true)
-
- o._putSym(SymIterator, valueProp(r.newNativeFunc(r.stringproto_iterator, nil, "[Symbol.iterator]", nil, 0), true, false, true))
+func createStringProtoTemplate() *objectTemplate {
+ t := newObjectTemplate()
+ t.protoFactory = func(r *Runtime) *Object {
+ return r.global.ObjectPrototype
+ }
+
+ t.putStr("length", func(r *Runtime) Value { return valueProp(intToValue(0), false, false, false) })
+
+ t.putStr("constructor", func(r *Runtime) Value { return valueProp(r.getString(), true, false, true) })
+
+ t.putStr("at", func(r *Runtime) Value { return r.methodProp(r.stringproto_at, "at", 1) })
+ t.putStr("charAt", func(r *Runtime) Value { return r.methodProp(r.stringproto_charAt, "charAt", 1) })
+ t.putStr("charCodeAt", func(r *Runtime) Value { return r.methodProp(r.stringproto_charCodeAt, "charCodeAt", 1) })
+ t.putStr("codePointAt", func(r *Runtime) Value { return r.methodProp(r.stringproto_codePointAt, "codePointAt", 1) })
+ t.putStr("concat", func(r *Runtime) Value { return r.methodProp(r.stringproto_concat, "concat", 1) })
+ t.putStr("endsWith", func(r *Runtime) Value { return r.methodProp(r.stringproto_endsWith, "endsWith", 1) })
+ t.putStr("includes", func(r *Runtime) Value { return r.methodProp(r.stringproto_includes, "includes", 1) })
+ t.putStr("indexOf", func(r *Runtime) Value { return r.methodProp(r.stringproto_indexOf, "indexOf", 1) })
+ t.putStr("lastIndexOf", func(r *Runtime) Value { return r.methodProp(r.stringproto_lastIndexOf, "lastIndexOf", 1) })
+ t.putStr("localeCompare", func(r *Runtime) Value { return r.methodProp(r.stringproto_localeCompare, "localeCompare", 1) })
+ t.putStr("match", func(r *Runtime) Value { return r.methodProp(r.stringproto_match, "match", 1) })
+ t.putStr("matchAll", func(r *Runtime) Value { return r.methodProp(r.stringproto_matchAll, "matchAll", 1) })
+ t.putStr("normalize", func(r *Runtime) Value { return r.methodProp(r.stringproto_normalize, "normalize", 0) })
+ t.putStr("padEnd", func(r *Runtime) Value { return r.methodProp(r.stringproto_padEnd, "padEnd", 1) })
+ t.putStr("padStart", func(r *Runtime) Value { return r.methodProp(r.stringproto_padStart, "padStart", 1) })
+ t.putStr("repeat", func(r *Runtime) Value { return r.methodProp(r.stringproto_repeat, "repeat", 1) })
+ t.putStr("replace", func(r *Runtime) Value { return r.methodProp(r.stringproto_replace, "replace", 2) })
+ t.putStr("replaceAll", func(r *Runtime) Value { return r.methodProp(r.stringproto_replaceAll, "replaceAll", 2) })
+ t.putStr("search", func(r *Runtime) Value { return r.methodProp(r.stringproto_search, "search", 1) })
+ t.putStr("slice", func(r *Runtime) Value { return r.methodProp(r.stringproto_slice, "slice", 2) })
+ t.putStr("split", func(r *Runtime) Value { return r.methodProp(r.stringproto_split, "split", 2) })
+ t.putStr("startsWith", func(r *Runtime) Value { return r.methodProp(r.stringproto_startsWith, "startsWith", 1) })
+ t.putStr("substring", func(r *Runtime) Value { return r.methodProp(r.stringproto_substring, "substring", 2) })
+ t.putStr("toLocaleLowerCase", func(r *Runtime) Value { return r.methodProp(r.stringproto_toLowerCase, "toLocaleLowerCase", 0) })
+ t.putStr("toLocaleUpperCase", func(r *Runtime) Value { return r.methodProp(r.stringproto_toUpperCase, "toLocaleUpperCase", 0) })
+ t.putStr("toLowerCase", func(r *Runtime) Value { return r.methodProp(r.stringproto_toLowerCase, "toLowerCase", 0) })
+ t.putStr("toString", func(r *Runtime) Value { return r.methodProp(r.stringproto_toString, "toString", 0) })
+ t.putStr("toUpperCase", func(r *Runtime) Value { return r.methodProp(r.stringproto_toUpperCase, "toUpperCase", 0) })
+ t.putStr("trim", func(r *Runtime) Value { return r.methodProp(r.stringproto_trim, "trim", 0) })
+ t.putStr("trimEnd", func(r *Runtime) Value { return valueProp(r.getStringproto_trimEnd(), true, false, true) })
+ t.putStr("trimStart", func(r *Runtime) Value { return valueProp(r.getStringproto_trimStart(), true, false, true) })
+ t.putStr("trimRight", func(r *Runtime) Value { return valueProp(r.getStringproto_trimEnd(), true, false, true) })
+ t.putStr("trimLeft", func(r *Runtime) Value { return valueProp(r.getStringproto_trimStart(), true, false, true) })
+ t.putStr("valueOf", func(r *Runtime) Value { return r.methodProp(r.stringproto_valueOf, "valueOf", 0) })
// Annex B
- o._putProp("substr", r.newNativeFunc(r.stringproto_substr, nil, "substr", nil, 2), true, false, true)
+ t.putStr("substr", func(r *Runtime) Value { return r.methodProp(r.stringproto_substr, "substr", 2) })
+
+ t.putSym(SymIterator, func(r *Runtime) Value {
+ return valueProp(r.newNativeFunc(r.stringproto_iterator, "[Symbol.iterator]", 0), true, false, true)
+ })
- r.global.String = r.newNativeFunc(r.builtin_String, r.builtin_newString, "String", r.global.StringPrototype, 1)
- o = r.global.String.self
- o._putProp("fromCharCode", r.newNativeFunc(r.string_fromcharcode, nil, "fromCharCode", nil, 1), true, false, true)
- o._putProp("fromCodePoint", r.newNativeFunc(r.string_fromcodepoint, nil, "fromCodePoint", nil, 1), true, false, true)
- o._putProp("raw", r.newNativeFunc(r.string_raw, nil, "raw", nil, 1), true, false, true)
+ return t
+}
- r.addToGlobal("String", r.global.String)
+func (r *Runtime) getStringproto_trimEnd() *Object {
+ ret := r.global.stringproto_trimEnd
+ if ret == nil {
+ ret = r.newNativeFunc(r.stringproto_trimEnd, "trimEnd", 0)
+ r.global.stringproto_trimEnd = ret
+ }
+ return ret
+}
- r.stringSingleton = r.builtin_new(r.global.String, nil).self.(*stringObject)
+func (r *Runtime) getStringproto_trimStart() *Object {
+ ret := r.global.stringproto_trimStart
+ if ret == nil {
+ ret = r.newNativeFunc(r.stringproto_trimStart, "trimStart", 0)
+ r.global.stringproto_trimStart = ret
+ }
+ return ret
+}
+
+func (r *Runtime) getStringSingleton() *stringObject {
+ ret := r.stringSingleton
+ if ret == nil {
+ ret = r.builtin_new(r.getString(), nil).self.(*stringObject)
+ r.stringSingleton = ret
+ }
+ return ret
+}
+
+func (r *Runtime) getString() *Object {
+ ret := r.global.String
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.String = ret
+ proto := r.getStringPrototype()
+ o := r.newNativeFuncAndConstruct(ret, r.builtin_String, r.wrapNativeConstruct(r.builtin_newString, ret, proto), proto, "String", intToValue(1))
+ ret.self = o
+ o._putProp("fromCharCode", r.newNativeFunc(r.string_fromcharcode, "fromCharCode", 1), true, false, true)
+ o._putProp("fromCodePoint", r.newNativeFunc(r.string_fromcodepoint, "fromCodePoint", 1), true, false, true)
+ o._putProp("raw", r.newNativeFunc(r.string_raw, "raw", 1), true, false, true)
+ }
+ return ret
+}
+
+var stringProtoTemplate *objectTemplate
+var stringProtoTemplateOnce sync.Once
+
+func getStringProtoTemplate() *objectTemplate {
+ stringProtoTemplateOnce.Do(func() {
+ stringProtoTemplate = createStringProtoTemplate()
+ })
+ return stringProtoTemplate
+}
+
+func (r *Runtime) getStringPrototype() *Object {
+ ret := r.global.StringPrototype
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.StringPrototype = ret
+ o := r.newTemplatedObject(getStringProtoTemplate(), ret)
+ o.class = classString
+ }
+ return ret
}
diff --git a/vendor/github.com/dop251/goja/builtin_symbol.go b/vendor/github.com/dop251/goja/builtin_symbol.go
index f072343527..8231b7bfcb 100644
--- a/vendor/github.com/dop251/goja/builtin_symbol.go
+++ b/vendor/github.com/dop251/goja/builtin_symbol.go
@@ -110,29 +110,29 @@ func (r *Runtime) createSymbolProto(val *Object) objectImpl {
}
o.init()
- o._putProp("constructor", r.global.Symbol, true, false, true)
+ o._putProp("constructor", r.getSymbol(), true, false, true)
o.setOwnStr("description", &valueProperty{
configurable: true,
getterFunc: r.newNativeFunc(func(call FunctionCall) Value {
return r.thisSymbolValue(call.This).desc
- }, nil, "get description", nil, 0),
+ }, "get description", 0),
accessor: true,
}, false)
- o._putProp("toString", r.newNativeFunc(r.symbolproto_tostring, nil, "toString", nil, 0), true, false, true)
- o._putProp("valueOf", r.newNativeFunc(r.symbolproto_valueOf, nil, "valueOf", nil, 0), true, false, true)
- o._putSym(SymToPrimitive, valueProp(r.newNativeFunc(r.symbolproto_valueOf, nil, "[Symbol.toPrimitive]", nil, 1), false, false, true))
+ o._putProp("toString", r.newNativeFunc(r.symbolproto_tostring, "toString", 0), true, false, true)
+ o._putProp("valueOf", r.newNativeFunc(r.symbolproto_valueOf, "valueOf", 0), true, false, true)
+ o._putSym(SymToPrimitive, valueProp(r.newNativeFunc(r.symbolproto_valueOf, "[Symbol.toPrimitive]", 1), false, false, true))
o._putSym(SymToStringTag, valueProp(newStringValue("Symbol"), false, false, true))
return o
}
func (r *Runtime) createSymbol(val *Object) objectImpl {
- o := r.newNativeFuncObj(val, r.builtin_symbol, func(args []Value, proto *Object) *Object {
+ o := r.newNativeFuncAndConstruct(val, r.builtin_symbol, func(args []Value, newTarget *Object) *Object {
panic(r.NewTypeError("Symbol is not a constructor"))
- }, "Symbol", r.global.SymbolPrototype, _positiveZero)
+ }, r.getSymbolPrototype(), "Symbol", _positiveZero)
- o._putProp("for", r.newNativeFunc(r.symbol_for, nil, "for", nil, 1), true, false, true)
- o._putProp("keyFor", r.newNativeFunc(r.symbol_keyfor, nil, "keyFor", nil, 1), true, false, true)
+ o._putProp("for", r.newNativeFunc(r.symbol_for, "for", 1), true, false, true)
+ o._putProp("keyFor", r.newNativeFunc(r.symbol_keyfor, "keyFor", 1), true, false, true)
for _, s := range []*Symbol{
SymHasInstance,
@@ -156,10 +156,22 @@ func (r *Runtime) createSymbol(val *Object) objectImpl {
return o
}
-func (r *Runtime) initSymbol() {
- r.global.SymbolPrototype = r.newLazyObject(r.createSymbolProto)
-
- r.global.Symbol = r.newLazyObject(r.createSymbol)
- r.addToGlobal("Symbol", r.global.Symbol)
+func (r *Runtime) getSymbolPrototype() *Object {
+ ret := r.global.SymbolPrototype
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.SymbolPrototype = ret
+ ret.self = r.createSymbolProto(ret)
+ }
+ return ret
+}
+func (r *Runtime) getSymbol() *Object {
+ ret := r.global.Symbol
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Symbol = ret
+ ret.self = r.createSymbol(ret)
+ }
+ return ret
}
diff --git a/vendor/github.com/dop251/goja/builtin_typedarrays.go b/vendor/github.com/dop251/goja/builtin_typedarrays.go
index 02113e8721..1fd672c147 100644
--- a/vendor/github.com/dop251/goja/builtin_typedarrays.go
+++ b/vendor/github.com/dop251/goja/builtin_typedarrays.go
@@ -4,6 +4,7 @@ import (
"fmt"
"math"
"sort"
+ "sync"
"unsafe"
"github.com/dop251/goja/unistring"
@@ -78,7 +79,7 @@ func (r *Runtime) builtin_newArrayBuffer(args []Value, newTarget *Object) *Objec
if newTarget == nil {
panic(r.needNew("ArrayBuffer"))
}
- b := r._newArrayBuffer(r.getPrototypeFromCtor(newTarget, r.global.ArrayBuffer, r.global.ArrayBufferPrototype), nil)
+ b := r._newArrayBuffer(r.getPrototypeFromCtor(newTarget, r.getArrayBuffer(), r.getArrayBufferPrototype()), nil)
if len(args) > 0 {
b.data = allocByteSlice(r.toIndex(args[0]))
}
@@ -109,7 +110,7 @@ func (r *Runtime) arrayBufferProto_slice(call FunctionCall) Value {
}
stop = relToIdx(stop, l)
newLen := max(stop-start, 0)
- ret := r.speciesConstructor(o, r.global.ArrayBuffer)([]Value{intToValue(newLen)}, nil)
+ ret := r.speciesConstructor(o, r.getArrayBuffer())([]Value{intToValue(newLen)}, nil)
if ab, ok := ret.self.(*arrayBufferObject); ok {
if newLen > 0 {
b.ensureNotDetached(true)
@@ -145,7 +146,7 @@ func (r *Runtime) newDataView(args []Value, newTarget *Object) *Object {
if newTarget == nil {
panic(r.needNew("DataView"))
}
- proto := r.getPrototypeFromCtor(newTarget, r.global.DataView, r.global.DataViewPrototype)
+ proto := r.getPrototypeFromCtor(newTarget, r.getDataView(), r.getDataViewPrototype())
var bufArg Value
if len(args) > 0 {
bufArg = args[0]
@@ -165,13 +166,13 @@ func (r *Runtime) newDataView(args []Value, newTarget *Object) *Object {
byteOffset = r.toIndex(offsetArg)
buffer.ensureNotDetached(true)
if byteOffset > len(buffer.data) {
- panic(r.newError(r.global.RangeError, "Start offset %s is outside the bounds of the buffer", offsetArg.String()))
+ panic(r.newError(r.getRangeError(), "Start offset %s is outside the bounds of the buffer", offsetArg.String()))
}
}
if len(args) > 2 && args[2] != nil && args[2] != _undefined {
byteLen = r.toIndex(args[2])
if byteOffset+byteLen > len(buffer.data) {
- panic(r.newError(r.global.RangeError, "Invalid DataView length %d", byteLen))
+ panic(r.newError(r.getRangeError(), "Invalid DataView length %d", byteLen))
}
} else {
byteLen = len(buffer.data) - byteOffset
@@ -509,7 +510,7 @@ func (r *Runtime) typedArrayProto_filter(call FunctionCall) Value {
}
}
c := r.speciesConstructorObj(o, ta.defaultCtor)
- ab := r._newArrayBuffer(r.global.ArrayBufferPrototype, nil)
+ ab := r._newArrayBuffer(r.getArrayBufferPrototype(), nil)
ab.data = buf
kept := r.toConstructor(ta.defaultCtor)([]Value{ab.val}, ta.defaultCtor)
if c == ta.defaultCtor {
@@ -945,7 +946,7 @@ func (r *Runtime) typedArrayProto_set(call FunctionCall) Value {
srcObj := call.Argument(0).ToObject(r)
targetOffset := toIntStrict(call.Argument(1).ToInteger())
if targetOffset < 0 {
- panic(r.newError(r.global.RangeError, "offset should be >= 0"))
+ panic(r.newError(r.getRangeError(), "offset should be >= 0"))
}
ta.viewedArrayBuf.ensureNotDetached(true)
targetLen := ta.length
@@ -953,7 +954,7 @@ func (r *Runtime) typedArrayProto_set(call FunctionCall) Value {
src.viewedArrayBuf.ensureNotDetached(true)
srcLen := src.length
if x := srcLen + targetOffset; x < 0 || x > targetLen {
- panic(r.newError(r.global.RangeError, "Source is too large"))
+ panic(r.newError(r.getRangeError(), "Source is too large"))
}
if src.defaultCtor == ta.defaultCtor {
copy(ta.viewedArrayBuf.data[(ta.offset+targetOffset)*ta.elemSize:],
@@ -1002,7 +1003,7 @@ func (r *Runtime) typedArrayProto_set(call FunctionCall) Value {
targetLen := ta.length
srcLen := toIntStrict(toLength(srcObj.self.getStr("length", nil)))
if x := srcLen + targetOffset; x < 0 || x > targetLen {
- panic(r.newError(r.global.RangeError, "Source is too large"))
+ panic(r.newError(r.getRangeError(), "Source is too large"))
}
for i := 0; i < srcLen; i++ {
val := nilSafe(srcObj.self.getIdx(valueInt(i), nil))
@@ -1214,7 +1215,7 @@ func (r *Runtime) typedArray_of(call FunctionCall) Value {
}
func (r *Runtime) allocateTypedArray(newTarget *Object, length int, taCtor typedArrayObjectCtor, proto *Object) *typedArrayObject {
- buf := r._newArrayBuffer(r.global.ArrayBufferPrototype, nil)
+ buf := r._newArrayBuffer(r.getArrayBufferPrototype(), nil)
ta := taCtor(buf, 0, length, r.getPrototypeFromCtor(newTarget, nil, proto))
if length > 0 {
buf.data = allocByteSlice(length * ta.elemSize)
@@ -1297,7 +1298,7 @@ func (r *Runtime) _newTypedArrayFromArrayBuffer(ab *arrayBufferObject, args []Va
if len(args) > 1 && args[1] != nil && args[1] != _undefined {
byteOffset = r.toIndex(args[1])
if byteOffset%ta.elemSize != 0 {
- panic(r.newError(r.global.RangeError, "Start offset of %s should be a multiple of %d", newTarget.self.getStr("name", nil), ta.elemSize))
+ panic(r.newError(r.getRangeError(), "Start offset of %s should be a multiple of %d", newTarget.self.getStr("name", nil), ta.elemSize))
}
}
var length int
@@ -1305,16 +1306,16 @@ func (r *Runtime) _newTypedArrayFromArrayBuffer(ab *arrayBufferObject, args []Va
length = r.toIndex(args[2])
ab.ensureNotDetached(true)
if byteOffset+length*ta.elemSize > len(ab.data) {
- panic(r.newError(r.global.RangeError, "Invalid typed array length: %d", length))
+ panic(r.newError(r.getRangeError(), "Invalid typed array length: %d", length))
}
} else {
ab.ensureNotDetached(true)
if len(ab.data)%ta.elemSize != 0 {
- panic(r.newError(r.global.RangeError, "Byte length of %s should be a multiple of %d", newTarget.self.getStr("name", nil), ta.elemSize))
+ panic(r.newError(r.getRangeError(), "Byte length of %s should be a multiple of %d", newTarget.self.getStr("name", nil), ta.elemSize))
}
length = (len(ab.data) - byteOffset) / ta.elemSize
if length < 0 {
- panic(r.newError(r.global.RangeError, "Start offset %d is outside the bounds of the buffer", byteOffset))
+ panic(r.newError(r.getRangeError(), "Start offset %d is outside the bounds of the buffer", byteOffset))
}
}
ta.offset = byteOffset / ta.elemSize
@@ -1327,7 +1328,8 @@ func (r *Runtime) _newTypedArrayFromTypedArray(src *typedArrayObject, newTarget
src.viewedArrayBuf.ensureNotDetached(true)
l := src.length
- dst.viewedArrayBuf.prototype = r.getPrototypeFromCtor(r.speciesConstructorObj(src.viewedArrayBuf.val, r.global.ArrayBuffer), r.global.ArrayBuffer, r.global.ArrayBufferPrototype)
+ arrayBuffer := r.getArrayBuffer()
+ dst.viewedArrayBuf.prototype = r.getPrototypeFromCtor(r.speciesConstructorObj(src.viewedArrayBuf.val, arrayBuffer), arrayBuffer, r.getArrayBufferPrototype())
dst.viewedArrayBuf.data = allocByteSlice(toIntStrict(int64(l) * int64(dst.elemSize)))
src.viewedArrayBuf.ensureNotDetached(true)
if src.defaultCtor == dst.defaultCtor {
@@ -1408,192 +1410,371 @@ func (r *Runtime) createArrayBufferProto(val *Object) objectImpl {
byteLengthProp := &valueProperty{
accessor: true,
configurable: true,
- getterFunc: r.newNativeFunc(r.arrayBufferProto_getByteLength, nil, "get byteLength", nil, 0),
+ getterFunc: r.newNativeFunc(r.arrayBufferProto_getByteLength, "get byteLength", 0),
}
b._put("byteLength", byteLengthProp)
- b._putProp("constructor", r.global.ArrayBuffer, true, false, true)
- b._putProp("slice", r.newNativeFunc(r.arrayBufferProto_slice, nil, "slice", nil, 2), true, false, true)
+ b._putProp("constructor", r.getArrayBuffer(), true, false, true)
+ b._putProp("slice", r.newNativeFunc(r.arrayBufferProto_slice, "slice", 2), true, false, true)
b._putSym(SymToStringTag, valueProp(asciiString("ArrayBuffer"), false, false, true))
return b
}
func (r *Runtime) createArrayBuffer(val *Object) objectImpl {
- o := r.newNativeConstructOnly(val, r.builtin_newArrayBuffer, r.global.ArrayBufferPrototype, "ArrayBuffer", 1)
- o._putProp("isView", r.newNativeFunc(r.arrayBuffer_isView, nil, "isView", nil, 1), true, false, true)
+ o := r.newNativeConstructOnly(val, r.builtin_newArrayBuffer, r.getArrayBufferPrototype(), "ArrayBuffer", 1)
+ o._putProp("isView", r.newNativeFunc(r.arrayBuffer_isView, "isView", 1), true, false, true)
r.putSpeciesReturnThis(o)
return o
}
-func (r *Runtime) createDataViewProto(val *Object) objectImpl {
- b := newBaseObjectObj(val, r.global.ObjectPrototype, classObject)
- b._put("buffer", &valueProperty{
- accessor: true,
- configurable: true,
- getterFunc: r.newNativeFunc(r.dataViewProto_getBuffer, nil, "get buffer", nil, 0),
- })
- b._put("byteLength", &valueProperty{
- accessor: true,
- configurable: true,
- getterFunc: r.newNativeFunc(r.dataViewProto_getByteLen, nil, "get byteLength", nil, 0),
- })
- b._put("byteOffset", &valueProperty{
- accessor: true,
- configurable: true,
- getterFunc: r.newNativeFunc(r.dataViewProto_getByteOffset, nil, "get byteOffset", nil, 0),
- })
- b._putProp("constructor", r.global.DataView, true, false, true)
- b._putProp("getFloat32", r.newNativeFunc(r.dataViewProto_getFloat32, nil, "getFloat32", nil, 1), true, false, true)
- b._putProp("getFloat64", r.newNativeFunc(r.dataViewProto_getFloat64, nil, "getFloat64", nil, 1), true, false, true)
- b._putProp("getInt8", r.newNativeFunc(r.dataViewProto_getInt8, nil, "getInt8", nil, 1), true, false, true)
- b._putProp("getInt16", r.newNativeFunc(r.dataViewProto_getInt16, nil, "getInt16", nil, 1), true, false, true)
- b._putProp("getInt32", r.newNativeFunc(r.dataViewProto_getInt32, nil, "getInt32", nil, 1), true, false, true)
- b._putProp("getUint8", r.newNativeFunc(r.dataViewProto_getUint8, nil, "getUint8", nil, 1), true, false, true)
- b._putProp("getUint16", r.newNativeFunc(r.dataViewProto_getUint16, nil, "getUint16", nil, 1), true, false, true)
- b._putProp("getUint32", r.newNativeFunc(r.dataViewProto_getUint32, nil, "getUint32", nil, 1), true, false, true)
- b._putProp("setFloat32", r.newNativeFunc(r.dataViewProto_setFloat32, nil, "setFloat32", nil, 2), true, false, true)
- b._putProp("setFloat64", r.newNativeFunc(r.dataViewProto_setFloat64, nil, "setFloat64", nil, 2), true, false, true)
- b._putProp("setInt8", r.newNativeFunc(r.dataViewProto_setInt8, nil, "setInt8", nil, 2), true, false, true)
- b._putProp("setInt16", r.newNativeFunc(r.dataViewProto_setInt16, nil, "setInt16", nil, 2), true, false, true)
- b._putProp("setInt32", r.newNativeFunc(r.dataViewProto_setInt32, nil, "setInt32", nil, 2), true, false, true)
- b._putProp("setUint8", r.newNativeFunc(r.dataViewProto_setUint8, nil, "setUint8", nil, 2), true, false, true)
- b._putProp("setUint16", r.newNativeFunc(r.dataViewProto_setUint16, nil, "setUint16", nil, 2), true, false, true)
- b._putProp("setUint32", r.newNativeFunc(r.dataViewProto_setUint32, nil, "setUint32", nil, 2), true, false, true)
- b._putSym(SymToStringTag, valueProp(asciiString("DataView"), false, false, true))
-
- return b
+func (r *Runtime) createDataView(val *Object) objectImpl {
+ o := r.newNativeConstructOnly(val, r.newDataView, r.getDataViewPrototype(), "DataView", 1)
+ return o
}
-func (r *Runtime) createDataView(val *Object) objectImpl {
- o := r.newNativeConstructOnly(val, r.newDataView, r.global.DataViewPrototype, "DataView", 1)
+func (r *Runtime) createTypedArray(val *Object) objectImpl {
+ o := r.newNativeConstructOnly(val, r.newTypedArray, r.getTypedArrayPrototype(), "TypedArray", 0)
+ o._putProp("from", r.newNativeFunc(r.typedArray_from, "from", 1), true, false, true)
+ o._putProp("of", r.newNativeFunc(r.typedArray_of, "of", 0), true, false, true)
+ r.putSpeciesReturnThis(o)
+
return o
}
-func (r *Runtime) createTypedArrayProto(val *Object) objectImpl {
- b := newBaseObjectObj(val, r.global.ObjectPrototype, classObject)
- b._put("buffer", &valueProperty{
- accessor: true,
- configurable: true,
- getterFunc: r.newNativeFunc(r.typedArrayProto_getBuffer, nil, "get buffer", nil, 0),
+func (r *Runtime) getTypedArray() *Object {
+ ret := r.global.TypedArray
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.TypedArray = ret
+ r.createTypedArray(ret)
+ }
+ return ret
+}
+
+func (r *Runtime) createTypedArrayCtor(val *Object, ctor func(args []Value, newTarget, proto *Object) *Object, name unistring.String, bytesPerElement int) {
+ p := r.newBaseObject(r.getTypedArrayPrototype(), classObject)
+ o := r.newNativeConstructOnly(val, func(args []Value, newTarget *Object) *Object {
+ return ctor(args, newTarget, p.val)
+ }, p.val, name, 3)
+
+ p._putProp("constructor", o.val, true, false, true)
+
+ o.prototype = r.getTypedArray()
+ bpe := intToValue(int64(bytesPerElement))
+ o._putProp("BYTES_PER_ELEMENT", bpe, false, false, false)
+ p._putProp("BYTES_PER_ELEMENT", bpe, false, false, false)
+}
+
+func addTypedArrays(t *objectTemplate) {
+ t.putStr("ArrayBuffer", func(r *Runtime) Value { return valueProp(r.getArrayBuffer(), true, false, true) })
+ t.putStr("DataView", func(r *Runtime) Value { return valueProp(r.getDataView(), true, false, true) })
+ t.putStr("Uint8Array", func(r *Runtime) Value { return valueProp(r.getUint8Array(), true, false, true) })
+ t.putStr("Uint8ClampedArray", func(r *Runtime) Value { return valueProp(r.getUint8ClampedArray(), true, false, true) })
+ t.putStr("Int8Array", func(r *Runtime) Value { return valueProp(r.getInt8Array(), true, false, true) })
+ t.putStr("Uint16Array", func(r *Runtime) Value { return valueProp(r.getUint16Array(), true, false, true) })
+ t.putStr("Int16Array", func(r *Runtime) Value { return valueProp(r.getInt16Array(), true, false, true) })
+ t.putStr("Uint32Array", func(r *Runtime) Value { return valueProp(r.getUint32Array(), true, false, true) })
+ t.putStr("Int32Array", func(r *Runtime) Value { return valueProp(r.getInt32Array(), true, false, true) })
+ t.putStr("Float32Array", func(r *Runtime) Value { return valueProp(r.getFloat32Array(), true, false, true) })
+ t.putStr("Float64Array", func(r *Runtime) Value { return valueProp(r.getFloat64Array(), true, false, true) })
+}
+
+func createTypedArrayProtoTemplate() *objectTemplate {
+ t := newObjectTemplate()
+ t.protoFactory = func(r *Runtime) *Object {
+ return r.global.ObjectPrototype
+ }
+
+ t.putStr("buffer", func(r *Runtime) Value {
+ return &valueProperty{
+ accessor: true,
+ configurable: true,
+ getterFunc: r.newNativeFunc(r.typedArrayProto_getBuffer, "get buffer", 0),
+ }
})
- b._put("byteLength", &valueProperty{
- accessor: true,
- configurable: true,
- getterFunc: r.newNativeFunc(r.typedArrayProto_getByteLen, nil, "get byteLength", nil, 0),
+
+ t.putStr("byteLength", func(r *Runtime) Value {
+ return &valueProperty{
+ accessor: true,
+ configurable: true,
+ getterFunc: r.newNativeFunc(r.typedArrayProto_getByteLen, "get byteLength", 0),
+ }
})
- b._put("byteOffset", &valueProperty{
- accessor: true,
- configurable: true,
- getterFunc: r.newNativeFunc(r.typedArrayProto_getByteOffset, nil, "get byteOffset", nil, 0),
+
+ t.putStr("byteOffset", func(r *Runtime) Value {
+ return &valueProperty{
+ accessor: true,
+ configurable: true,
+ getterFunc: r.newNativeFunc(r.typedArrayProto_getByteOffset, "get byteOffset", 0),
+ }
})
- b._putProp("at", r.newNativeFunc(r.typedArrayProto_at, nil, "at", nil, 1), true, false, true)
- b._putProp("constructor", r.global.TypedArray, true, false, true)
- b._putProp("copyWithin", r.newNativeFunc(r.typedArrayProto_copyWithin, nil, "copyWithin", nil, 2), true, false, true)
- b._putProp("entries", r.newNativeFunc(r.typedArrayProto_entries, nil, "entries", nil, 0), true, false, true)
- b._putProp("every", r.newNativeFunc(r.typedArrayProto_every, nil, "every", nil, 1), true, false, true)
- b._putProp("fill", r.newNativeFunc(r.typedArrayProto_fill, nil, "fill", nil, 1), true, false, true)
- b._putProp("filter", r.newNativeFunc(r.typedArrayProto_filter, nil, "filter", nil, 1), true, false, true)
- b._putProp("find", r.newNativeFunc(r.typedArrayProto_find, nil, "find", nil, 1), true, false, true)
- b._putProp("findIndex", r.newNativeFunc(r.typedArrayProto_findIndex, nil, "findIndex", nil, 1), true, false, true)
- b._putProp("findLast", r.newNativeFunc(r.typedArrayProto_findLast, nil, "findLast", nil, 1), true, false, true)
- b._putProp("findLastIndex", r.newNativeFunc(r.typedArrayProto_findLastIndex, nil, "findLastIndex", nil, 1), true, false, true)
- b._putProp("forEach", r.newNativeFunc(r.typedArrayProto_forEach, nil, "forEach", nil, 1), true, false, true)
- b._putProp("includes", r.newNativeFunc(r.typedArrayProto_includes, nil, "includes", nil, 1), true, false, true)
- b._putProp("indexOf", r.newNativeFunc(r.typedArrayProto_indexOf, nil, "indexOf", nil, 1), true, false, true)
- b._putProp("join", r.newNativeFunc(r.typedArrayProto_join, nil, "join", nil, 1), true, false, true)
- b._putProp("keys", r.newNativeFunc(r.typedArrayProto_keys, nil, "keys", nil, 0), true, false, true)
- b._putProp("lastIndexOf", r.newNativeFunc(r.typedArrayProto_lastIndexOf, nil, "lastIndexOf", nil, 1), true, false, true)
- b._put("length", &valueProperty{
- accessor: true,
- configurable: true,
- getterFunc: r.newNativeFunc(r.typedArrayProto_getLength, nil, "get length", nil, 0),
+
+ t.putStr("at", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_at, "at", 1) })
+ t.putStr("constructor", func(r *Runtime) Value { return valueProp(r.getTypedArray(), true, false, true) })
+ t.putStr("copyWithin", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_copyWithin, "copyWithin", 2) })
+ t.putStr("entries", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_entries, "entries", 0) })
+ t.putStr("every", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_every, "every", 1) })
+ t.putStr("fill", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_fill, "fill", 1) })
+ t.putStr("filter", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_filter, "filter", 1) })
+ t.putStr("find", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_find, "find", 1) })
+ t.putStr("findIndex", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_findIndex, "findIndex", 1) })
+ t.putStr("findLast", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_findLast, "findLast", 1) })
+ t.putStr("findLastIndex", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_findLastIndex, "findLastIndex", 1) })
+ t.putStr("forEach", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_forEach, "forEach", 1) })
+ t.putStr("includes", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_includes, "includes", 1) })
+ t.putStr("indexOf", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_indexOf, "indexOf", 1) })
+ t.putStr("join", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_join, "join", 1) })
+ t.putStr("keys", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_keys, "keys", 0) })
+ t.putStr("lastIndexOf", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_lastIndexOf, "lastIndexOf", 1) })
+ t.putStr("length", func(r *Runtime) Value {
+ return &valueProperty{
+ accessor: true,
+ configurable: true,
+ getterFunc: r.newNativeFunc(r.typedArrayProto_getLength, "get length", 0),
+ }
})
- b._putProp("map", r.newNativeFunc(r.typedArrayProto_map, nil, "map", nil, 1), true, false, true)
- b._putProp("reduce", r.newNativeFunc(r.typedArrayProto_reduce, nil, "reduce", nil, 1), true, false, true)
- b._putProp("reduceRight", r.newNativeFunc(r.typedArrayProto_reduceRight, nil, "reduceRight", nil, 1), true, false, true)
- b._putProp("reverse", r.newNativeFunc(r.typedArrayProto_reverse, nil, "reverse", nil, 0), true, false, true)
- b._putProp("set", r.newNativeFunc(r.typedArrayProto_set, nil, "set", nil, 1), true, false, true)
- b._putProp("slice", r.newNativeFunc(r.typedArrayProto_slice, nil, "slice", nil, 2), true, false, true)
- b._putProp("some", r.newNativeFunc(r.typedArrayProto_some, nil, "some", nil, 1), true, false, true)
- b._putProp("sort", r.newNativeFunc(r.typedArrayProto_sort, nil, "sort", nil, 1), true, false, true)
- b._putProp("subarray", r.newNativeFunc(r.typedArrayProto_subarray, nil, "subarray", nil, 2), true, false, true)
- b._putProp("toLocaleString", r.newNativeFunc(r.typedArrayProto_toLocaleString, nil, "toLocaleString", nil, 0), true, false, true)
- b._putProp("toString", r.global.arrayToString, true, false, true)
- valuesFunc := r.newNativeFunc(r.typedArrayProto_values, nil, "values", nil, 0)
- b._putProp("values", valuesFunc, true, false, true)
- b._putSym(SymIterator, valueProp(valuesFunc, true, false, true))
- b._putSym(SymToStringTag, &valueProperty{
- getterFunc: r.newNativeFunc(r.typedArrayProto_toStringTag, nil, "get [Symbol.toStringTag]", nil, 0),
- accessor: true,
- configurable: true,
+ t.putStr("map", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_map, "map", 1) })
+ t.putStr("reduce", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_reduce, "reduce", 1) })
+ t.putStr("reduceRight", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_reduceRight, "reduceRight", 1) })
+ t.putStr("reverse", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_reverse, "reverse", 0) })
+ t.putStr("set", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_set, "set", 1) })
+ t.putStr("slice", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_slice, "slice", 2) })
+ t.putStr("some", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_some, "some", 1) })
+ t.putStr("sort", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_sort, "sort", 1) })
+ t.putStr("subarray", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_subarray, "subarray", 2) })
+ t.putStr("toLocaleString", func(r *Runtime) Value { return r.methodProp(r.typedArrayProto_toLocaleString, "toLocaleString", 0) })
+ t.putStr("toString", func(r *Runtime) Value { return valueProp(r.getArrayToString(), true, false, true) })
+ t.putStr("values", func(r *Runtime) Value { return valueProp(r.getTypedArrayValues(), true, false, true) })
+
+ t.putSym(SymIterator, func(r *Runtime) Value { return valueProp(r.getTypedArrayValues(), true, false, true) })
+ t.putSym(SymToStringTag, func(r *Runtime) Value {
+ return &valueProperty{
+ getterFunc: r.newNativeFunc(r.typedArrayProto_toStringTag, "get [Symbol.toStringTag]", 0),
+ accessor: true,
+ configurable: true,
+ }
})
- return b
+ return t
}
-func (r *Runtime) createTypedArray(val *Object) objectImpl {
- o := r.newNativeConstructOnly(val, r.newTypedArray, r.global.TypedArrayPrototype, "TypedArray", 0)
- o._putProp("from", r.newNativeFunc(r.typedArray_from, nil, "from", nil, 1), true, false, true)
- o._putProp("of", r.newNativeFunc(r.typedArray_of, nil, "of", nil, 0), true, false, true)
- r.putSpeciesReturnThis(o)
+func (r *Runtime) getTypedArrayValues() *Object {
+ ret := r.global.typedArrayValues
+ if ret == nil {
+ ret = r.newNativeFunc(r.typedArrayProto_values, "values", 0)
+ r.global.typedArrayValues = ret
+ }
+ return ret
+}
- return o
+var typedArrayProtoTemplate *objectTemplate
+var typedArrayProtoTemplateOnce sync.Once
+
+func getTypedArrayProtoTemplate() *objectTemplate {
+ typedArrayProtoTemplateOnce.Do(func() {
+ typedArrayProtoTemplate = createTypedArrayProtoTemplate()
+ })
+ return typedArrayProtoTemplate
+}
+
+func (r *Runtime) getTypedArrayPrototype() *Object {
+ ret := r.global.TypedArrayPrototype
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.TypedArrayPrototype = ret
+ r.newTemplatedObject(getTypedArrayProtoTemplate(), ret)
+ }
+ return ret
+}
+
+func (r *Runtime) getUint8Array() *Object {
+ ret := r.global.Uint8Array
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Uint8Array = ret
+ r.createTypedArrayCtor(ret, r.newUint8Array, "Uint8Array", 1)
+ }
+ return ret
+}
+
+func (r *Runtime) getUint8ClampedArray() *Object {
+ ret := r.global.Uint8ClampedArray
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Uint8ClampedArray = ret
+ r.createTypedArrayCtor(ret, r.newUint8ClampedArray, "Uint8ClampedArray", 1)
+ }
+ return ret
+}
+
+func (r *Runtime) getInt8Array() *Object {
+ ret := r.global.Int8Array
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Int8Array = ret
+ r.createTypedArrayCtor(ret, r.newInt8Array, "Int8Array", 1)
+ }
+ return ret
+}
+
+func (r *Runtime) getUint16Array() *Object {
+ ret := r.global.Uint16Array
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Uint16Array = ret
+ r.createTypedArrayCtor(ret, r.newUint16Array, "Uint16Array", 2)
+ }
+ return ret
+}
+
+func (r *Runtime) getInt16Array() *Object {
+ ret := r.global.Int16Array
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Int16Array = ret
+ r.createTypedArrayCtor(ret, r.newInt16Array, "Int16Array", 2)
+ }
+ return ret
}
-func (r *Runtime) typedArrayCreator(ctor func(args []Value, newTarget, proto *Object) *Object, name unistring.String, bytesPerElement int) func(val *Object) objectImpl {
- return func(val *Object) objectImpl {
- p := r.newBaseObject(r.global.TypedArrayPrototype, classObject)
- o := r.newNativeConstructOnly(val, func(args []Value, newTarget *Object) *Object {
- return ctor(args, newTarget, p.val)
- }, p.val, name, 3)
+func (r *Runtime) getUint32Array() *Object {
+ ret := r.global.Uint32Array
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Uint32Array = ret
+ r.createTypedArrayCtor(ret, r.newUint32Array, "Uint32Array", 4)
+ }
+ return ret
+}
- p._putProp("constructor", o.val, true, false, true)
+func (r *Runtime) getInt32Array() *Object {
+ ret := r.global.Int32Array
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Int32Array = ret
+ r.createTypedArrayCtor(ret, r.newInt32Array, "Int32Array", 4)
+ }
+ return ret
+}
- o.prototype = r.global.TypedArray
- bpe := intToValue(int64(bytesPerElement))
- o._putProp("BYTES_PER_ELEMENT", bpe, false, false, false)
- p._putProp("BYTES_PER_ELEMENT", bpe, false, false, false)
- return o
+func (r *Runtime) getFloat32Array() *Object {
+ ret := r.global.Float32Array
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Float32Array = ret
+ r.createTypedArrayCtor(ret, r.newFloat32Array, "Float32Array", 4)
}
+ return ret
}
-func (r *Runtime) initTypedArrays() {
+func (r *Runtime) getFloat64Array() *Object {
+ ret := r.global.Float64Array
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.Float64Array = ret
+ r.createTypedArrayCtor(ret, r.newFloat64Array, "Float64Array", 8)
+ }
+ return ret
+}
- r.global.ArrayBufferPrototype = r.newLazyObject(r.createArrayBufferProto)
- r.global.ArrayBuffer = r.newLazyObject(r.createArrayBuffer)
- r.addToGlobal("ArrayBuffer", r.global.ArrayBuffer)
+func createDataViewProtoTemplate() *objectTemplate {
+ t := newObjectTemplate()
+ t.protoFactory = func(r *Runtime) *Object {
+ return r.global.ObjectPrototype
+ }
- r.global.DataViewPrototype = r.newLazyObject(r.createDataViewProto)
- r.global.DataView = r.newLazyObject(r.createDataView)
- r.addToGlobal("DataView", r.global.DataView)
+ t.putStr("buffer", func(r *Runtime) Value {
+ return &valueProperty{
+ accessor: true,
+ configurable: true,
+ getterFunc: r.newNativeFunc(r.dataViewProto_getBuffer, "get buffer", 0),
+ }
+ })
+ t.putStr("byteLength", func(r *Runtime) Value {
+ return &valueProperty{
+ accessor: true,
+ configurable: true,
+ getterFunc: r.newNativeFunc(r.dataViewProto_getByteLen, "get byteLength", 0),
+ }
+ })
+ t.putStr("byteOffset", func(r *Runtime) Value {
+ return &valueProperty{
+ accessor: true,
+ configurable: true,
+ getterFunc: r.newNativeFunc(r.dataViewProto_getByteOffset, "get byteOffset", 0),
+ }
+ })
- r.global.TypedArrayPrototype = r.newLazyObject(r.createTypedArrayProto)
- r.global.TypedArray = r.newLazyObject(r.createTypedArray)
+ t.putStr("constructor", func(r *Runtime) Value { return valueProp(r.getDataView(), true, false, true) })
- r.global.Uint8Array = r.newLazyObject(r.typedArrayCreator(r.newUint8Array, "Uint8Array", 1))
- r.addToGlobal("Uint8Array", r.global.Uint8Array)
+ t.putStr("getFloat32", func(r *Runtime) Value { return r.methodProp(r.dataViewProto_getFloat32, "getFloat32", 1) })
+ t.putStr("getFloat64", func(r *Runtime) Value { return r.methodProp(r.dataViewProto_getFloat64, "getFloat64", 1) })
+ t.putStr("getInt8", func(r *Runtime) Value { return r.methodProp(r.dataViewProto_getInt8, "getInt8", 1) })
+ t.putStr("getInt16", func(r *Runtime) Value { return r.methodProp(r.dataViewProto_getInt16, "getInt16", 1) })
+ t.putStr("getInt32", func(r *Runtime) Value { return r.methodProp(r.dataViewProto_getInt32, "getInt32", 1) })
+ t.putStr("getUint8", func(r *Runtime) Value { return r.methodProp(r.dataViewProto_getUint8, "getUint8", 1) })
+ t.putStr("getUint16", func(r *Runtime) Value { return r.methodProp(r.dataViewProto_getUint16, "getUint16", 1) })
+ t.putStr("getUint32", func(r *Runtime) Value { return r.methodProp(r.dataViewProto_getUint32, "getUint32", 1) })
+ t.putStr("setFloat32", func(r *Runtime) Value { return r.methodProp(r.dataViewProto_setFloat32, "setFloat32", 2) })
+ t.putStr("setFloat64", func(r *Runtime) Value { return r.methodProp(r.dataViewProto_setFloat64, "setFloat64", 2) })
+ t.putStr("setInt8", func(r *Runtime) Value { return r.methodProp(r.dataViewProto_setInt8, "setInt8", 2) })
+ t.putStr("setInt16", func(r *Runtime) Value { return r.methodProp(r.dataViewProto_setInt16, "setInt16", 2) })
+ t.putStr("setInt32", func(r *Runtime) Value { return r.methodProp(r.dataViewProto_setInt32, "setInt32", 2) })
+ t.putStr("setUint8", func(r *Runtime) Value { return r.methodProp(r.dataViewProto_setUint8, "setUint8", 2) })
+ t.putStr("setUint16", func(r *Runtime) Value { return r.methodProp(r.dataViewProto_setUint16, "setUint16", 2) })
+ t.putStr("setUint32", func(r *Runtime) Value { return r.methodProp(r.dataViewProto_setUint32, "setUint32", 2) })
- r.global.Uint8ClampedArray = r.newLazyObject(r.typedArrayCreator(r.newUint8ClampedArray, "Uint8ClampedArray", 1))
- r.addToGlobal("Uint8ClampedArray", r.global.Uint8ClampedArray)
+ t.putSym(SymToStringTag, func(r *Runtime) Value { return valueProp(asciiString("DataView"), false, false, true) })
- r.global.Int8Array = r.newLazyObject(r.typedArrayCreator(r.newInt8Array, "Int8Array", 1))
- r.addToGlobal("Int8Array", r.global.Int8Array)
+ return t
+}
- r.global.Uint16Array = r.newLazyObject(r.typedArrayCreator(r.newUint16Array, "Uint16Array", 2))
- r.addToGlobal("Uint16Array", r.global.Uint16Array)
+var dataViewProtoTemplate *objectTemplate
+var dataViewProtoTemplateOnce sync.Once
- r.global.Int16Array = r.newLazyObject(r.typedArrayCreator(r.newInt16Array, "Int16Array", 2))
- r.addToGlobal("Int16Array", r.global.Int16Array)
+func getDataViewProtoTemplate() *objectTemplate {
+ dataViewProtoTemplateOnce.Do(func() {
+ dataViewProtoTemplate = createDataViewProtoTemplate()
+ })
+ return dataViewProtoTemplate
+}
- r.global.Uint32Array = r.newLazyObject(r.typedArrayCreator(r.newUint32Array, "Uint32Array", 4))
- r.addToGlobal("Uint32Array", r.global.Uint32Array)
+func (r *Runtime) getDataViewPrototype() *Object {
+ ret := r.global.DataViewPrototype
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.DataViewPrototype = ret
+ r.newTemplatedObject(getDataViewProtoTemplate(), ret)
+ }
+ return ret
+}
- r.global.Int32Array = r.newLazyObject(r.typedArrayCreator(r.newInt32Array, "Int32Array", 4))
- r.addToGlobal("Int32Array", r.global.Int32Array)
+func (r *Runtime) getDataView() *Object {
+ ret := r.global.DataView
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.DataView = ret
+ ret.self = r.createDataView(ret)
+ }
+ return ret
+}
- r.global.Float32Array = r.newLazyObject(r.typedArrayCreator(r.newFloat32Array, "Float32Array", 4))
- r.addToGlobal("Float32Array", r.global.Float32Array)
+func (r *Runtime) getArrayBufferPrototype() *Object {
+ ret := r.global.ArrayBufferPrototype
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.ArrayBufferPrototype = ret
+ ret.self = r.createArrayBufferProto(ret)
+ }
+ return ret
+}
- r.global.Float64Array = r.newLazyObject(r.typedArrayCreator(r.newFloat64Array, "Float64Array", 8))
- r.addToGlobal("Float64Array", r.global.Float64Array)
+func (r *Runtime) getArrayBuffer() *Object {
+ ret := r.global.ArrayBuffer
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.ArrayBuffer = ret
+ ret.self = r.createArrayBuffer(ret)
+ }
+ return ret
}
diff --git a/vendor/github.com/dop251/goja/builtin_weakmap.go b/vendor/github.com/dop251/goja/builtin_weakmap.go
index a291f74b96..40fc717db1 100644
--- a/vendor/github.com/dop251/goja/builtin_weakmap.go
+++ b/vendor/github.com/dop251/goja/builtin_weakmap.go
@@ -90,17 +90,6 @@ func (r *Runtime) needNew(name string) *Object {
return r.NewTypeError("Constructor %s requires 'new'", name)
}
-func (r *Runtime) getPrototypeFromCtor(newTarget, defCtor, defProto *Object) *Object {
- if newTarget == defCtor {
- return defProto
- }
- proto := newTarget.self.getStr("prototype", nil)
- if obj, ok := proto.(*Object); ok {
- return obj
- }
- return defProto
-}
-
func (r *Runtime) builtin_newWeakMap(args []Value, newTarget *Object) *Object {
if newTarget == nil {
panic(r.needNew("WeakMap"))
@@ -148,12 +137,12 @@ func (r *Runtime) builtin_newWeakMap(args []Value, newTarget *Object) *Object {
func (r *Runtime) createWeakMapProto(val *Object) objectImpl {
o := newBaseObjectObj(val, r.global.ObjectPrototype, classObject)
- o._putProp("constructor", r.global.WeakMap, true, false, true)
- r.global.weakMapAdder = r.newNativeFunc(r.weakMapProto_set, nil, "set", nil, 2)
+ o._putProp("constructor", r.getWeakMap(), true, false, true)
+ r.global.weakMapAdder = r.newNativeFunc(r.weakMapProto_set, "set", 2)
o._putProp("set", r.global.weakMapAdder, true, false, true)
- o._putProp("delete", r.newNativeFunc(r.weakMapProto_delete, nil, "delete", nil, 1), true, false, true)
- o._putProp("has", r.newNativeFunc(r.weakMapProto_has, nil, "has", nil, 1), true, false, true)
- o._putProp("get", r.newNativeFunc(r.weakMapProto_get, nil, "get", nil, 1), true, false, true)
+ o._putProp("delete", r.newNativeFunc(r.weakMapProto_delete, "delete", 1), true, false, true)
+ o._putProp("has", r.newNativeFunc(r.weakMapProto_has, "has", 1), true, false, true)
+ o._putProp("get", r.newNativeFunc(r.weakMapProto_get, "get", 1), true, false, true)
o._putSym(SymToStringTag, valueProp(asciiString(classWeakMap), false, false, true))
@@ -161,14 +150,27 @@ func (r *Runtime) createWeakMapProto(val *Object) objectImpl {
}
func (r *Runtime) createWeakMap(val *Object) objectImpl {
- o := r.newNativeConstructOnly(val, r.builtin_newWeakMap, r.global.WeakMapPrototype, "WeakMap", 0)
+ o := r.newNativeConstructOnly(val, r.builtin_newWeakMap, r.getWeakMapPrototype(), "WeakMap", 0)
return o
}
-func (r *Runtime) initWeakMap() {
- r.global.WeakMapPrototype = r.newLazyObject(r.createWeakMapProto)
- r.global.WeakMap = r.newLazyObject(r.createWeakMap)
+func (r *Runtime) getWeakMapPrototype() *Object {
+ ret := r.global.WeakMapPrototype
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.WeakMapPrototype = ret
+ ret.self = r.createWeakMapProto(ret)
+ }
+ return ret
+}
- r.addToGlobal("WeakMap", r.global.WeakMap)
+func (r *Runtime) getWeakMap() *Object {
+ ret := r.global.WeakMap
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.WeakMap = ret
+ ret.self = r.createWeakMap(ret)
+ }
+ return ret
}
diff --git a/vendor/github.com/dop251/goja/builtin_weakset.go b/vendor/github.com/dop251/goja/builtin_weakset.go
index 027f03a592..cd8183e591 100644
--- a/vendor/github.com/dop251/goja/builtin_weakset.go
+++ b/vendor/github.com/dop251/goja/builtin_weakset.go
@@ -98,10 +98,10 @@ func (r *Runtime) createWeakSetProto(val *Object) objectImpl {
o := newBaseObjectObj(val, r.global.ObjectPrototype, classObject)
o._putProp("constructor", r.global.WeakSet, true, false, true)
- r.global.weakSetAdder = r.newNativeFunc(r.weakSetProto_add, nil, "add", nil, 1)
+ r.global.weakSetAdder = r.newNativeFunc(r.weakSetProto_add, "add", 1)
o._putProp("add", r.global.weakSetAdder, true, false, true)
- o._putProp("delete", r.newNativeFunc(r.weakSetProto_delete, nil, "delete", nil, 1), true, false, true)
- o._putProp("has", r.newNativeFunc(r.weakSetProto_has, nil, "has", nil, 1), true, false, true)
+ o._putProp("delete", r.newNativeFunc(r.weakSetProto_delete, "delete", 1), true, false, true)
+ o._putProp("has", r.newNativeFunc(r.weakSetProto_has, "has", 1), true, false, true)
o._putSym(SymToStringTag, valueProp(asciiString(classWeakSet), false, false, true))
@@ -109,14 +109,27 @@ func (r *Runtime) createWeakSetProto(val *Object) objectImpl {
}
func (r *Runtime) createWeakSet(val *Object) objectImpl {
- o := r.newNativeConstructOnly(val, r.builtin_newWeakSet, r.global.WeakSetPrototype, "WeakSet", 0)
+ o := r.newNativeConstructOnly(val, r.builtin_newWeakSet, r.getWeakSetPrototype(), "WeakSet", 0)
return o
}
-func (r *Runtime) initWeakSet() {
- r.global.WeakSetPrototype = r.newLazyObject(r.createWeakSetProto)
- r.global.WeakSet = r.newLazyObject(r.createWeakSet)
+func (r *Runtime) getWeakSetPrototype() *Object {
+ ret := r.global.WeakSetPrototype
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.WeakSetPrototype = ret
+ ret.self = r.createWeakSetProto(ret)
+ }
+ return ret
+}
- r.addToGlobal("WeakSet", r.global.WeakSet)
+func (r *Runtime) getWeakSet() *Object {
+ ret := r.global.WeakSet
+ if ret == nil {
+ ret = &Object{runtime: r}
+ r.global.WeakSet = ret
+ ret.self = r.createWeakSet(ret)
+ }
+ return ret
}
diff --git a/vendor/github.com/dop251/goja/func.go b/vendor/github.com/dop251/goja/func.go
index 9040a531a7..c046956966 100644
--- a/vendor/github.com/dop251/goja/func.go
+++ b/vendor/github.com/dop251/goja/func.go
@@ -367,7 +367,7 @@ func (f *classFuncObject) construct(args []Value, newTarget *Object) *Object {
if v := r.vm.stack[r.vm.sp+1]; v != nil { // using residual 'this' value (a bit hacky)
instance = r.toObject(v)
} else {
- panic(r.newError(r.global.ReferenceError, "Must call super constructor in derived class before returning from derived constructor"))
+ panic(r.newError(r.getReferenceError(), "Must call super constructor in derived class before returning from derived constructor"))
}
}
return instance
@@ -509,9 +509,9 @@ func (f *baseFuncObject) init(name unistring.String, length Value) {
f._putProp("name", stringValueFromRaw(name), false, false, true)
}
-func (f *baseFuncObject) hasInstance(v Value) bool {
+func hasInstance(val *Object, v Value) bool {
if v, ok := v.(*Object); ok {
- o := f.val.self.getStr("prototype", nil)
+ o := val.self.getStr("prototype", nil)
if o1, ok := o.(*Object); ok {
for {
v = v.self.proto()
@@ -523,13 +523,17 @@ func (f *baseFuncObject) hasInstance(v Value) bool {
}
}
} else {
- f.val.runtime.typeErrorResult(true, "prototype is not an object")
+ panic(val.runtime.NewTypeError("prototype is not an object"))
}
}
return false
}
+func (f *baseFuncObject) hasInstance(v Value) bool {
+ return hasInstance(f.val, v)
+}
+
func (f *nativeFuncObject) defaultConstruct(ccall func(ConstructorCall) *Object, args []Value, newTarget *Object) *Object {
obj := f.createInstance(newTarget)
ret := ccall(ConstructorCall{
@@ -707,7 +711,7 @@ func (ar *asyncRunner) step(res Value, done bool, ex *Exception) {
}
// await
- promise := r.promiseResolve(r.global.Promise, res)
+ promise := r.promiseResolve(r.getPromise(), res)
promise.self.(*Promise).addReactions(&promiseReaction{
typ: promiseReactionFulfill,
handler: &jobCallback{callback: ar.onFulfilled},
@@ -722,7 +726,7 @@ func (ar *asyncRunner) step(res Value, done bool, ex *Exception) {
func (ar *asyncRunner) start(nArgs int) {
r := ar.f.runtime
ar.gen.vm = r.vm
- ar.promiseCap = r.newPromiseCapability(r.global.Promise)
+ ar.promiseCap = r.newPromiseCapability(r.getPromise())
sp := r.vm.sp
ar.gen.enter()
ar.vmCall(r.vm, nArgs)
diff --git a/vendor/github.com/dop251/goja/object.go b/vendor/github.com/dop251/goja/object.go
index 99a06d3799..79bd67df42 100644
--- a/vendor/github.com/dop251/goja/object.go
+++ b/vendor/github.com/dop251/goja/object.go
@@ -871,7 +871,7 @@ func (o *Object) ordinaryToPrimitiveString() Value {
return v
}
- panic(o.runtime.NewTypeError("Could not convert %v to primitive", o.self))
+ panic(o.runtime.NewTypeError("Could not convert %v (%T) to primitive", o.self, o.self))
}
func (o *Object) tryExoticToPrimitive(hint Value) Value {
@@ -916,8 +916,8 @@ func (o *baseObject) assertCallable() (func(FunctionCall) Value, bool) {
return nil, false
}
-func (o *baseObject) vmCall(vm *vm, n int) {
- vm.r.typeErrorResult(true, "Not a function: %s", o.val.toString())
+func (o *baseObject) vmCall(vm *vm, _ int) {
+ panic(vm.r.NewTypeError("Not a function: %s", o.val.toString()))
}
func (o *baseObject) assertConstructor() func(args []Value, newTarget *Object) *Object {
diff --git a/vendor/github.com/dop251/goja/object_dynamic.go b/vendor/github.com/dop251/goja/object_dynamic.go
index f224224d41..b1e3161e94 100644
--- a/vendor/github.com/dop251/goja/object_dynamic.go
+++ b/vendor/github.com/dop251/goja/object_dynamic.go
@@ -141,7 +141,7 @@ func (r *Runtime) NewDynamicArray(a DynamicArray) *Object {
a: a,
baseDynamicObject: baseDynamicObject{
val: v,
- prototype: r.global.ArrayPrototype,
+ prototype: r.getArrayPrototype(),
},
}
v.self = o
diff --git a/vendor/github.com/dop251/goja/object_goarray_reflect.go b/vendor/github.com/dop251/goja/object_goarray_reflect.go
index 3db5a892bb..e40364db9e 100644
--- a/vendor/github.com/dop251/goja/object_goarray_reflect.go
+++ b/vendor/github.com/dop251/goja/object_goarray_reflect.go
@@ -59,7 +59,7 @@ func (c *valueArrayCache) shrink(newlen int) {
func (o *objectGoArrayReflect) _init() {
o.objectGoReflect.init()
o.class = classArray
- o.prototype = o.val.runtime.global.ArrayPrototype
+ o.prototype = o.val.runtime.getArrayPrototype()
o.baseObject._put("length", &o.lengthProp)
}
diff --git a/vendor/github.com/dop251/goja/object_goreflect.go b/vendor/github.com/dop251/goja/object_goreflect.go
index a4584d3953..7ad5970efb 100644
--- a/vendor/github.com/dop251/goja/object_goreflect.go
+++ b/vendor/github.com/dop251/goja/object_goreflect.go
@@ -122,24 +122,24 @@ func (o *objectGoReflect) init() {
switch o.fieldsValue.Kind() {
case reflect.Bool:
o.class = classBoolean
- o.prototype = o.val.runtime.global.BooleanPrototype
+ o.prototype = o.val.runtime.getBooleanPrototype()
o.toString = o._toStringBool
o.valueOf = o._valueOfBool
case reflect.String:
o.class = classString
- o.prototype = o.val.runtime.global.StringPrototype
+ o.prototype = o.val.runtime.getStringPrototype()
o.toString = o._toStringString
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
o.class = classNumber
- o.prototype = o.val.runtime.global.NumberPrototype
+ o.prototype = o.val.runtime.getNumberPrototype()
o.valueOf = o._valueOfInt
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
o.class = classNumber
- o.prototype = o.val.runtime.global.NumberPrototype
+ o.prototype = o.val.runtime.getNumberPrototype()
o.valueOf = o._valueOfUint
case reflect.Float32, reflect.Float64:
o.class = classNumber
- o.prototype = o.val.runtime.global.NumberPrototype
+ o.prototype = o.val.runtime.getNumberPrototype()
o.valueOf = o._valueOfFloat
default:
o.class = classObject
diff --git a/vendor/github.com/dop251/goja/object_goslice.go b/vendor/github.com/dop251/goja/object_goslice.go
index 4f509dd4cf..1a5220746d 100644
--- a/vendor/github.com/dop251/goja/object_goslice.go
+++ b/vendor/github.com/dop251/goja/object_goslice.go
@@ -34,7 +34,7 @@ func (r *Runtime) newObjectGoSlice(data *[]interface{}, isPtr bool) *objectGoSli
func (o *objectGoSlice) init() {
o.baseObject.init()
o.class = classArray
- o.prototype = o.val.runtime.global.ArrayPrototype
+ o.prototype = o.val.runtime.getArrayPrototype()
o.lengthProp.writable = true
o.extensible = true
o.baseObject._put("length", &o.lengthProp)
diff --git a/vendor/github.com/dop251/goja/object_lazy.go b/vendor/github.com/dop251/goja/object_lazy.go
deleted file mode 100644
index 171bdf63c8..0000000000
--- a/vendor/github.com/dop251/goja/object_lazy.go
+++ /dev/null
@@ -1,314 +0,0 @@
-package goja
-
-import (
- "reflect"
-
- "github.com/dop251/goja/unistring"
-)
-
-type lazyObject struct {
- val *Object
- create func(*Object) objectImpl
-}
-
-func (o *lazyObject) className() string {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.className()
-}
-
-func (o *lazyObject) typeOf() String {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.typeOf()
-}
-
-func (o *lazyObject) getIdx(p valueInt, receiver Value) Value {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.getIdx(p, receiver)
-}
-
-func (o *lazyObject) getSym(p *Symbol, receiver Value) Value {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.getSym(p, receiver)
-}
-
-func (o *lazyObject) getOwnPropIdx(idx valueInt) Value {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.getOwnPropIdx(idx)
-}
-
-func (o *lazyObject) getOwnPropSym(s *Symbol) Value {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.getOwnPropSym(s)
-}
-
-func (o *lazyObject) hasPropertyIdx(idx valueInt) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.hasPropertyIdx(idx)
-}
-
-func (o *lazyObject) hasPropertySym(s *Symbol) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.hasPropertySym(s)
-}
-
-func (o *lazyObject) hasOwnPropertyIdx(idx valueInt) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.hasOwnPropertyIdx(idx)
-}
-
-func (o *lazyObject) hasOwnPropertySym(s *Symbol) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.hasOwnPropertySym(s)
-}
-
-func (o *lazyObject) defineOwnPropertyStr(name unistring.String, desc PropertyDescriptor, throw bool) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.defineOwnPropertyStr(name, desc, throw)
-}
-
-func (o *lazyObject) defineOwnPropertyIdx(name valueInt, desc PropertyDescriptor, throw bool) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.defineOwnPropertyIdx(name, desc, throw)
-}
-
-func (o *lazyObject) defineOwnPropertySym(name *Symbol, desc PropertyDescriptor, throw bool) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.defineOwnPropertySym(name, desc, throw)
-}
-
-func (o *lazyObject) deleteIdx(idx valueInt, throw bool) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.deleteIdx(idx, throw)
-}
-
-func (o *lazyObject) deleteSym(s *Symbol, throw bool) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.deleteSym(s, throw)
-}
-
-func (o *lazyObject) getStr(name unistring.String, receiver Value) Value {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.getStr(name, receiver)
-}
-
-func (o *lazyObject) getOwnPropStr(name unistring.String) Value {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.getOwnPropStr(name)
-}
-
-func (o *lazyObject) setOwnStr(p unistring.String, v Value, throw bool) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.setOwnStr(p, v, throw)
-}
-
-func (o *lazyObject) setOwnIdx(p valueInt, v Value, throw bool) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.setOwnIdx(p, v, throw)
-}
-
-func (o *lazyObject) setOwnSym(p *Symbol, v Value, throw bool) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.setOwnSym(p, v, throw)
-}
-
-func (o *lazyObject) setForeignStr(p unistring.String, v, receiver Value, throw bool) (bool, bool) {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.setForeignStr(p, v, receiver, throw)
-}
-
-func (o *lazyObject) setForeignIdx(p valueInt, v, receiver Value, throw bool) (bool, bool) {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.setForeignIdx(p, v, receiver, throw)
-}
-
-func (o *lazyObject) setForeignSym(p *Symbol, v, receiver Value, throw bool) (bool, bool) {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.setForeignSym(p, v, receiver, throw)
-}
-
-func (o *lazyObject) hasPropertyStr(name unistring.String) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.hasPropertyStr(name)
-}
-
-func (o *lazyObject) hasOwnPropertyStr(name unistring.String) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.hasOwnPropertyStr(name)
-}
-
-func (o *lazyObject) _putProp(unistring.String, Value, bool, bool, bool) Value {
- panic("cannot use _putProp() in lazy object")
-}
-
-func (o *lazyObject) _putSym(*Symbol, Value) {
- panic("cannot use _putSym() in lazy object")
-}
-
-func (o *lazyObject) assertCallable() (call func(FunctionCall) Value, ok bool) {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.assertCallable()
-}
-
-func (o *lazyObject) vmCall(vm *vm, n int) {
- obj := o.create(o.val)
- o.val.self = obj
- obj.vmCall(vm, n)
-}
-
-func (o *lazyObject) assertConstructor() func(args []Value, newTarget *Object) *Object {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.assertConstructor()
-}
-
-func (o *lazyObject) deleteStr(name unistring.String, throw bool) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.deleteStr(name, throw)
-}
-
-func (o *lazyObject) proto() *Object {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.proto()
-}
-
-func (o *lazyObject) hasInstance(v Value) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.hasInstance(v)
-}
-
-func (o *lazyObject) isExtensible() bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.isExtensible()
-}
-
-func (o *lazyObject) preventExtensions(throw bool) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.preventExtensions(throw)
-}
-
-func (o *lazyObject) iterateStringKeys() iterNextFunc {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.iterateStringKeys()
-}
-
-func (o *lazyObject) iterateSymbols() iterNextFunc {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.iterateSymbols()
-}
-
-func (o *lazyObject) iterateKeys() iterNextFunc {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.iterateKeys()
-}
-
-func (o *lazyObject) export(ctx *objectExportCtx) interface{} {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.export(ctx)
-}
-
-func (o *lazyObject) exportType() reflect.Type {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.exportType()
-}
-
-func (o *lazyObject) exportToMap(m reflect.Value, typ reflect.Type, ctx *objectExportCtx) error {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.exportToMap(m, typ, ctx)
-}
-
-func (o *lazyObject) exportToArrayOrSlice(s reflect.Value, typ reflect.Type, ctx *objectExportCtx) error {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.exportToArrayOrSlice(s, typ, ctx)
-}
-
-func (o *lazyObject) equal(other objectImpl) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.equal(other)
-}
-
-func (o *lazyObject) stringKeys(all bool, accum []Value) []Value {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.stringKeys(all, accum)
-}
-
-func (o *lazyObject) symbols(all bool, accum []Value) []Value {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.symbols(all, accum)
-}
-
-func (o *lazyObject) keys(all bool, accum []Value) []Value {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.keys(all, accum)
-}
-
-func (o *lazyObject) setProto(proto *Object, throw bool) bool {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.setProto(proto, throw)
-}
-
-func (o *lazyObject) getPrivateEnv(typ *privateEnvType, create bool) *privateElements {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.getPrivateEnv(typ, create)
-}
-
-func (o *lazyObject) sortLen() int {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.sortLen()
-}
-
-func (o *lazyObject) sortGet(i int) Value {
- obj := o.create(o.val)
- o.val.self = obj
- return obj.sortGet(i)
-}
-
-func (o *lazyObject) swap(i int, j int) {
- obj := o.create(o.val)
- o.val.self = obj
- obj.swap(i, j)
-}
diff --git a/vendor/github.com/dop251/goja/object_template.go b/vendor/github.com/dop251/goja/object_template.go
new file mode 100644
index 0000000000..6d42f9f980
--- /dev/null
+++ b/vendor/github.com/dop251/goja/object_template.go
@@ -0,0 +1,469 @@
+package goja
+
+import (
+ "fmt"
+ "github.com/dop251/goja/unistring"
+ "math"
+ "reflect"
+ "sort"
+)
+
+type templatePropFactory func(*Runtime) Value
+
+type objectTemplate struct {
+ propNames []unistring.String
+ props map[unistring.String]templatePropFactory
+
+ symProps map[*Symbol]templatePropFactory
+ symPropNames []*Symbol
+
+ protoFactory func(*Runtime) *Object
+}
+
+type templatedObject struct {
+ baseObject
+ tmpl *objectTemplate
+
+ protoMaterialised bool
+}
+
+type templatedFuncObject struct {
+ templatedObject
+
+ f func(FunctionCall) Value
+ construct func(args []Value, newTarget *Object) *Object
+}
+
+// This type exists because Array.prototype is supposed to be an array itself and I could not find
+// a different way of implementing it without either introducing another layer of interfaces or hoisting
+// the templates to baseObject both of which would have had a negative effect on the performance.
+// The implementation is as simple as possible and is not optimised in any way, but I very much doubt anybody
+// uses Array.prototype as an actual array.
+type templatedArrayObject struct {
+ templatedObject
+}
+
+func newObjectTemplate() *objectTemplate {
+ return &objectTemplate{
+ props: make(map[unistring.String]templatePropFactory),
+ }
+}
+
+func (t *objectTemplate) putStr(name unistring.String, f templatePropFactory) {
+ t.props[name] = f
+ t.propNames = append(t.propNames, name)
+}
+
+func (t *objectTemplate) putSym(s *Symbol, f templatePropFactory) {
+ if t.symProps == nil {
+ t.symProps = make(map[*Symbol]templatePropFactory)
+ }
+ t.symProps[s] = f
+ t.symPropNames = append(t.symPropNames, s)
+}
+
+func (r *Runtime) newTemplatedObject(tmpl *objectTemplate, obj *Object) *templatedObject {
+ if obj == nil {
+ obj = &Object{runtime: r}
+ }
+ o := &templatedObject{
+ baseObject: baseObject{
+ class: classObject,
+ val: obj,
+ extensible: true,
+ },
+ tmpl: tmpl,
+ }
+ obj.self = o
+ o.init()
+ return o
+}
+
+func (o *templatedObject) materialiseProto() {
+ if !o.protoMaterialised {
+ if o.tmpl.protoFactory != nil {
+ o.prototype = o.tmpl.protoFactory(o.val.runtime)
+ }
+ o.protoMaterialised = true
+ }
+}
+
+func (o *templatedObject) getStr(name unistring.String, receiver Value) Value {
+ ownProp := o.getOwnPropStr(name)
+ if ownProp == nil {
+ o.materialiseProto()
+ }
+ return o.getStrWithOwnProp(ownProp, name, receiver)
+}
+
+func (o *templatedObject) getSym(s *Symbol, receiver Value) Value {
+ ownProp := o.getOwnPropSym(s)
+ if ownProp == nil {
+ o.materialiseProto()
+ }
+ return o.getWithOwnProp(ownProp, s, receiver)
+}
+
+func (o *templatedObject) getOwnPropStr(p unistring.String) Value {
+ if v, exists := o.values[p]; exists {
+ return v
+ }
+ if f := o.tmpl.props[p]; f != nil {
+ v := f(o.val.runtime)
+ o.values[p] = v
+ return v
+ }
+ return nil
+}
+
+func (o *templatedObject) materialiseSymbols() {
+ if o.symValues == nil {
+ o.symValues = newOrderedMap(nil)
+ for _, p := range o.tmpl.symPropNames {
+ o.symValues.set(p, o.tmpl.symProps[p](o.val.runtime))
+ }
+ }
+}
+
+func (o *templatedObject) getOwnPropSym(s *Symbol) Value {
+ if o.symValues == nil && o.tmpl.symProps[s] == nil {
+ return nil
+ }
+ o.materialiseSymbols()
+ return o.baseObject.getOwnPropSym(s)
+}
+
+func (o *templatedObject) materialisePropNames() {
+ if o.propNames == nil {
+ o.propNames = append(([]unistring.String)(nil), o.tmpl.propNames...)
+ }
+}
+
+func (o *templatedObject) setOwnStr(p unistring.String, v Value, throw bool) bool {
+ existing := o.getOwnPropStr(p) // materialise property (in case it's an accessor)
+ if existing == nil {
+ o.materialiseProto()
+ o.materialisePropNames()
+ }
+ return o.baseObject.setOwnStr(p, v, throw)
+}
+
+func (o *templatedObject) setOwnSym(name *Symbol, val Value, throw bool) bool {
+ o.materialiseSymbols()
+ o.materialiseProto()
+ return o.baseObject.setOwnSym(name, val, throw)
+}
+
+func (o *templatedObject) setForeignStr(name unistring.String, val, receiver Value, throw bool) (bool, bool) {
+ ownProp := o.getOwnPropStr(name)
+ if ownProp == nil {
+ o.materialiseProto()
+ }
+ return o._setForeignStr(name, ownProp, val, receiver, throw)
+}
+
+func (o *templatedObject) proto() *Object {
+ o.materialiseProto()
+ return o.prototype
+}
+
+func (o *templatedObject) setProto(proto *Object, throw bool) bool {
+ o.protoMaterialised = true
+ ret := o.baseObject.setProto(proto, throw)
+ if ret {
+ o.protoMaterialised = true
+ }
+ return ret
+}
+
+func (o *templatedObject) setForeignIdx(name valueInt, val, receiver Value, throw bool) (bool, bool) {
+ return o.setForeignStr(name.string(), val, receiver, throw)
+}
+
+func (o *templatedObject) setForeignSym(name *Symbol, val, receiver Value, throw bool) (bool, bool) {
+ o.materialiseProto()
+ o.materialiseSymbols()
+ return o.baseObject.setForeignSym(name, val, receiver, throw)
+}
+
+func (o *templatedObject) hasPropertyStr(name unistring.String) bool {
+ if o.val.self.hasOwnPropertyStr(name) {
+ return true
+ }
+ o.materialiseProto()
+ if o.prototype != nil {
+ return o.prototype.self.hasPropertyStr(name)
+ }
+ return false
+}
+
+func (o *templatedObject) hasPropertySym(s *Symbol) bool {
+ if o.hasOwnPropertySym(s) {
+ return true
+ }
+ o.materialiseProto()
+ if o.prototype != nil {
+ return o.prototype.self.hasPropertySym(s)
+ }
+ return false
+}
+
+func (o *templatedObject) hasOwnPropertyStr(name unistring.String) bool {
+ if v, exists := o.values[name]; exists {
+ return v != nil
+ }
+
+ _, exists := o.tmpl.props[name]
+ return exists
+}
+
+func (o *templatedObject) hasOwnPropertySym(s *Symbol) bool {
+ if o.symValues != nil {
+ return o.symValues.has(s)
+ }
+ _, exists := o.tmpl.symProps[s]
+ return exists
+}
+
+func (o *templatedObject) defineOwnPropertyStr(name unistring.String, descr PropertyDescriptor, throw bool) bool {
+ existingVal := o.getOwnPropStr(name)
+ if v, ok := o._defineOwnProperty(name, existingVal, descr, throw); ok {
+ o.values[name] = v
+ if existingVal == nil {
+ o.materialisePropNames()
+ names := copyNamesIfNeeded(o.propNames, 1)
+ o.propNames = append(names, name)
+ }
+ return true
+ }
+ return false
+}
+
+func (o *templatedObject) defineOwnPropertySym(s *Symbol, descr PropertyDescriptor, throw bool) bool {
+ o.materialiseSymbols()
+ return o.baseObject.defineOwnPropertySym(s, descr, throw)
+}
+
+func (o *templatedObject) deleteStr(name unistring.String, throw bool) bool {
+ if val := o.getOwnPropStr(name); val != nil {
+ if !o.checkDelete(name, val, throw) {
+ return false
+ }
+ o.materialisePropNames()
+ o._delete(name)
+ if _, exists := o.tmpl.props[name]; exists {
+ o.values[name] = nil // white hole
+ }
+ }
+ return true
+}
+
+func (o *templatedObject) deleteSym(s *Symbol, throw bool) bool {
+ o.materialiseSymbols()
+ return o.baseObject.deleteSym(s, throw)
+}
+
+func (o *templatedObject) materialiseProps() {
+ for name, f := range o.tmpl.props {
+ if _, exists := o.values[name]; !exists {
+ o.values[name] = f(o.val.runtime)
+ }
+ }
+ o.materialisePropNames()
+}
+
+func (o *templatedObject) iterateStringKeys() iterNextFunc {
+ o.materialiseProps()
+ return o.baseObject.iterateStringKeys()
+}
+
+func (o *templatedObject) iterateSymbols() iterNextFunc {
+ o.materialiseSymbols()
+ return o.baseObject.iterateSymbols()
+}
+
+func (o *templatedObject) stringKeys(all bool, keys []Value) []Value {
+ if all {
+ o.materialisePropNames()
+ } else {
+ o.materialiseProps()
+ }
+ return o.baseObject.stringKeys(all, keys)
+}
+
+func (o *templatedObject) symbols(all bool, accum []Value) []Value {
+ o.materialiseSymbols()
+ return o.baseObject.symbols(all, accum)
+}
+
+func (o *templatedObject) keys(all bool, accum []Value) []Value {
+ return o.symbols(all, o.stringKeys(all, accum))
+}
+
+func (r *Runtime) newTemplatedFuncObject(tmpl *objectTemplate, obj *Object, f func(FunctionCall) Value, ctor func([]Value, *Object) *Object) *templatedFuncObject {
+ if obj == nil {
+ obj = &Object{runtime: r}
+ }
+ o := &templatedFuncObject{
+ templatedObject: templatedObject{
+ baseObject: baseObject{
+ class: classFunction,
+ val: obj,
+ extensible: true,
+ },
+ tmpl: tmpl,
+ },
+ f: f,
+ construct: ctor,
+ }
+ obj.self = o
+ o.init()
+ return o
+}
+
+func (f *templatedFuncObject) source() String {
+ return newStringValue(fmt.Sprintf("function %s() { [native code] }", nilSafe(f.getStr("name", nil)).toString()))
+}
+
+func (f *templatedFuncObject) export(*objectExportCtx) interface{} {
+ return f.f
+}
+
+func (f *templatedFuncObject) assertCallable() (func(FunctionCall) Value, bool) {
+ if f.f != nil {
+ return f.f, true
+ }
+ return nil, false
+}
+
+func (f *templatedFuncObject) vmCall(vm *vm, n int) {
+ var nf nativeFuncObject
+ nf.f = f.f
+ nf.vmCall(vm, n)
+}
+
+func (f *templatedFuncObject) assertConstructor() func(args []Value, newTarget *Object) *Object {
+ return f.construct
+}
+
+func (f *templatedFuncObject) exportType() reflect.Type {
+ return reflectTypeFunc
+}
+
+func (f *templatedFuncObject) typeOf() String {
+ return stringFunction
+}
+
+func (f *templatedFuncObject) hasInstance(v Value) bool {
+ return hasInstance(f.val, v)
+}
+
+func (r *Runtime) newTemplatedArrayObject(tmpl *objectTemplate, obj *Object) *templatedArrayObject {
+ if obj == nil {
+ obj = &Object{runtime: r}
+ }
+ o := &templatedArrayObject{
+ templatedObject: templatedObject{
+ baseObject: baseObject{
+ class: classArray,
+ val: obj,
+ extensible: true,
+ },
+ tmpl: tmpl,
+ },
+ }
+ obj.self = o
+ o.init()
+ return o
+}
+
+func (a *templatedArrayObject) getLenProp() *valueProperty {
+ lenProp, _ := a.getOwnPropStr("length").(*valueProperty)
+ if lenProp == nil {
+ panic(a.val.runtime.NewTypeError("missing length property"))
+ }
+ return lenProp
+}
+
+func (a *templatedArrayObject) _setOwnIdx(idx uint32) {
+ lenProp := a.getLenProp()
+ l := uint32(lenProp.value.ToInteger())
+ if idx >= l {
+ lenProp.value = intToValue(int64(idx) + 1)
+ }
+}
+
+func (a *templatedArrayObject) setLength(l uint32, throw bool) bool {
+ lenProp := a.getLenProp()
+ oldLen := uint32(lenProp.value.ToInteger())
+ if l == oldLen {
+ return true
+ }
+ if !lenProp.writable {
+ a.val.runtime.typeErrorResult(throw, "length is not writable")
+ return false
+ }
+ ret := true
+ if l < oldLen {
+ a.materialisePropNames()
+ a.fixPropOrder()
+ i := sort.Search(a.idxPropCount, func(idx int) bool {
+ return strToArrayIdx(a.propNames[idx]) >= l
+ })
+ for j := a.idxPropCount - 1; j >= i; j-- {
+ if !a.deleteStr(a.propNames[j], false) {
+ l = strToArrayIdx(a.propNames[j]) + 1
+ ret = false
+ break
+ }
+ }
+ }
+ lenProp.value = intToValue(int64(l))
+ return ret
+}
+
+func (a *templatedArrayObject) setOwnStr(name unistring.String, value Value, throw bool) bool {
+ if name == "length" {
+ return a.setLength(a.val.runtime.toLengthUint32(value), throw)
+ }
+ if !a.templatedObject.setOwnStr(name, value, throw) {
+ return false
+ }
+ if idx := strToArrayIdx(name); idx != math.MaxUint32 {
+ a._setOwnIdx(idx)
+ }
+ return true
+}
+
+func (a *templatedArrayObject) setOwnIdx(p valueInt, v Value, throw bool) bool {
+ if !a.templatedObject.setOwnStr(p.string(), v, throw) {
+ return false
+ }
+ if idx := toIdx(p); idx != math.MaxUint32 {
+ a._setOwnIdx(idx)
+ }
+ return true
+}
+
+func (a *templatedArrayObject) defineOwnPropertyStr(name unistring.String, descr PropertyDescriptor, throw bool) bool {
+ if name == "length" {
+ return a.val.runtime.defineArrayLength(a.getLenProp(), descr, a.setLength, throw)
+ }
+ if !a.templatedObject.defineOwnPropertyStr(name, descr, throw) {
+ return false
+ }
+ if idx := strToArrayIdx(name); idx != math.MaxUint32 {
+ a._setOwnIdx(idx)
+ }
+ return true
+}
+
+func (a *templatedArrayObject) defineOwnPropertyIdx(p valueInt, desc PropertyDescriptor, throw bool) bool {
+ if !a.templatedObject.defineOwnPropertyStr(p.string(), desc, throw) {
+ return false
+ }
+ if idx := toIdx(p); idx != math.MaxUint32 {
+ a._setOwnIdx(idx)
+ }
+ return true
+}
diff --git a/vendor/github.com/dop251/goja/parser/statement.go b/vendor/github.com/dop251/goja/parser/statement.go
index adc220a388..8ec5cdeb77 100644
--- a/vendor/github.com/dop251/goja/parser/statement.go
+++ b/vendor/github.com/dop251/goja/parser/statement.go
@@ -511,9 +511,10 @@ func (self *_parser) parseThrowStatement() ast.Statement {
}
func (self *_parser) parseSwitchStatement() ast.Statement {
- self.expect(token.SWITCH)
+ idx := self.expect(token.SWITCH)
self.expect(token.LEFT_PARENTHESIS)
node := &ast.SwitchStatement{
+ Switch: idx,
Discriminant: self.parseExpression(),
Default: -1,
}
@@ -529,6 +530,7 @@ func (self *_parser) parseSwitchStatement() ast.Statement {
for index := 0; self.token != token.EOF; index++ {
if self.token == token.RIGHT_BRACE {
+ node.RightBrace = self.idx
self.next()
break
}
@@ -547,11 +549,10 @@ func (self *_parser) parseSwitchStatement() ast.Statement {
}
func (self *_parser) parseWithStatement() ast.Statement {
- self.expect(token.WITH)
+ node := &ast.WithStatement{}
+ node.With = self.expect(token.WITH)
self.expect(token.LEFT_PARENTHESIS)
- node := &ast.WithStatement{
- Object: self.parseExpression(),
- }
+ node.Object = self.parseExpression()
self.expect(token.RIGHT_PARENTHESIS)
self.scope.allowLet = false
node.Body = self.parseStatement()
@@ -816,8 +817,8 @@ func (self *_parser) parseDoWhileStatement() ast.Statement {
self.scope.inIteration = inIteration
}()
- self.expect(token.DO)
node := &ast.DoWhileStatement{}
+ node.Do = self.expect(token.DO)
if self.token == token.LEFT_BRACE {
node.Body = self.parseBlockStatement()
} else {
@@ -828,7 +829,7 @@ func (self *_parser) parseDoWhileStatement() ast.Statement {
self.expect(token.WHILE)
self.expect(token.LEFT_PARENTHESIS)
node.Test = self.parseExpression()
- self.expect(token.RIGHT_PARENTHESIS)
+ node.RightParenthesis = self.expect(token.RIGHT_PARENTHESIS)
if self.token == token.SEMICOLON {
self.next()
}
@@ -837,10 +838,11 @@ func (self *_parser) parseDoWhileStatement() ast.Statement {
}
func (self *_parser) parseWhileStatement() ast.Statement {
- self.expect(token.WHILE)
+ idx := self.expect(token.WHILE)
self.expect(token.LEFT_PARENTHESIS)
node := &ast.WhileStatement{
- Test: self.parseExpression(),
+ While: idx,
+ Test: self.parseExpression(),
}
self.expect(token.RIGHT_PARENTHESIS)
node.Body = self.parseIterationStatement()
diff --git a/vendor/github.com/dop251/goja/runtime.go b/vendor/github.com/dop251/goja/runtime.go
index 816037dbff..578b46792e 100644
--- a/vendor/github.com/dop251/goja/runtime.go
+++ b/vendor/github.com/dop251/goja/runtime.go
@@ -58,7 +58,10 @@ type global struct {
Date *Object
Symbol *Object
Proxy *Object
+ Reflect *Object
Promise *Object
+ Math *Object
+ JSON *Object
AsyncFunction *Object
@@ -123,21 +126,11 @@ type global struct {
StringIteratorPrototype *Object
RegExpStringIteratorPrototype *Object
- ErrorPrototype *Object
- AggregateErrorPrototype *Object
- TypeErrorPrototype *Object
- SyntaxErrorPrototype *Object
- RangeErrorPrototype *Object
- ReferenceErrorPrototype *Object
- EvalErrorPrototype *Object
- URIErrorPrototype *Object
-
- GoErrorPrototype *Object
+ ErrorPrototype *Object
Eval *Object
- thrower *Object
- throwerProperty Value
+ thrower *Object
stdRegexpProto *guardedObject
@@ -147,6 +140,13 @@ type global struct {
setAdder *Object
arrayValues *Object
arrayToString *Object
+
+ stringproto_trimEnd *Object
+ stringproto_trimStart *Object
+
+ parseFloat, parseInt *Object
+
+ typedArrayValues *Object
}
type Flag int
@@ -399,14 +399,10 @@ func (e *Exception) Value() Value {
return e.val
}
-func (r *Runtime) addToGlobal(name string, value Value) {
- r.globalObject.self._putProp(unistring.String(name), value, true, false, true)
-}
-
func (r *Runtime) createIterProto(val *Object) objectImpl {
o := newBaseObjectObj(val, r.global.ObjectPrototype, classObject)
- o._putSym(SymIterator, valueProp(r.newNativeFunc(r.returnThis, nil, "[Symbol.iterator]", nil, 0), true, false, true))
+ o._putSym(SymIterator, valueProp(r.newNativeFunc(r.returnThis, "[Symbol.iterator]", 0), true, false, true))
return o
}
@@ -423,68 +419,17 @@ func (r *Runtime) getIteratorPrototype() *Object {
func (r *Runtime) init() {
r.rand = rand.Float64
r.now = time.Now
- r.global.ObjectPrototype = r.newBaseObject(nil, classObject).val
- r.globalObject = r.NewObject()
+
+ r.global.ObjectPrototype = &Object{runtime: r}
+ r.newTemplatedObject(getObjectProtoTemplate(), r.global.ObjectPrototype)
+
+ r.globalObject = &Object{runtime: r}
+ r.newTemplatedObject(getGlobalObjectTemplate(), r.globalObject)
r.vm = &vm{
r: r,
}
r.vm.init()
-
- funcProto := r.newNativeFunc(func(FunctionCall) Value {
- return _undefined
- }, nil, " ", nil, 0)
- r.global.FunctionPrototype = funcProto
- funcProtoObj := funcProto.self.(*nativeFuncObject)
-
- r.initObject()
- r.initFunction()
- r.initArray()
- r.initString()
- r.initGlobalObject()
- r.initNumber()
- r.initRegExp()
- r.initDate()
- r.initBoolean()
- r.initProxy()
- r.initReflect()
-
- r.initErrors()
-
- r.global.Eval = r.newNativeFunc(r.builtin_eval, nil, "eval", nil, 1)
- r.addToGlobal("eval", r.global.Eval)
-
- r.initMath()
- r.initJSON()
-
- r.initTypedArrays()
- r.initSymbol()
- r.initWeakSet()
- r.initWeakMap()
- r.initMap()
- r.initSet()
- r.initPromise()
-
- r.global.thrower = r.newNativeFunc(r.builtin_thrower, nil, "", nil, 0)
- r.global.throwerProperty = &valueProperty{
- getterFunc: r.global.thrower,
- setterFunc: r.global.thrower,
- accessor: true,
- }
- r.object_freeze(FunctionCall{Arguments: []Value{r.global.thrower}})
-
- funcProtoObj._put("caller", &valueProperty{
- getterFunc: r.global.thrower,
- setterFunc: r.global.thrower,
- accessor: true,
- configurable: true,
- })
- funcProtoObj._put("arguments", &valueProperty{
- getterFunc: r.global.thrower,
- setterFunc: r.global.thrower,
- accessor: true,
- configurable: true,
- })
}
func (r *Runtime) typeErrorResult(throw bool, args ...interface{}) {
@@ -508,11 +453,11 @@ func (r *Runtime) throwReferenceError(name unistring.String) {
}
func (r *Runtime) newReferenceError(name unistring.String) Value {
- return r.newError(r.global.ReferenceError, "%s is not defined", name)
+ return r.newError(r.getReferenceError(), "%s is not defined", name)
}
func (r *Runtime) newSyntaxError(msg string, offset int) Value {
- return r.builtin_new(r.global.SyntaxError, []Value{newStringValue(msg)})
+ return r.builtin_new(r.getSyntaxError(), []Value{newStringValue(msg)})
}
func newBaseObjectObj(obj, proto *Object, class string) *baseObject {
@@ -574,11 +519,11 @@ func (r *Runtime) NewTypeError(args ...interface{}) *Object {
f, _ := args[0].(string)
msg = fmt.Sprintf(f, args[1:]...)
}
- return r.builtin_new(r.global.TypeError, []Value{newStringValue(msg)})
+ return r.builtin_new(r.getTypeError(), []Value{newStringValue(msg)})
}
func (r *Runtime) NewGoError(err error) *Object {
- e := r.newError(r.global.GoError, err.Error()).(*Object)
+ e := r.newError(r.getGoError(), err.Error()).(*Object)
e.Set("value", err)
return e
}
@@ -634,7 +579,7 @@ func (r *Runtime) initBaseJsFunction(f *baseJsFuncObject, strict bool) {
f.val = v
f.extensible = true
f.strict = strict
- f.prototype = r.global.FunctionPrototype
+ f.prototype = r.getFunctionPrototype()
}
func (r *Runtime) newMethod(name unistring.String, length int, strict bool) (f *methodFuncObject) {
@@ -686,27 +631,6 @@ func (r *Runtime) newAsyncArrowFunc(name unistring.String, length int, strict bo
return
}
-func (r *Runtime) newNativeFuncObj(v *Object, call func(FunctionCall) Value, construct func(args []Value, proto *Object) *Object, name unistring.String, proto *Object, length Value) *nativeFuncObject {
- f := &nativeFuncObject{
- baseFuncObject: baseFuncObject{
- baseObject: baseObject{
- class: classFunction,
- val: v,
- extensible: true,
- prototype: r.global.FunctionPrototype,
- },
- },
- f: call,
- construct: r.wrapNativeConstruct(construct, proto),
- }
- v.self = f
- f.init(name, length)
- if proto != nil {
- f._putProp("prototype", proto, false, false, false)
- }
- return f
-}
-
func (r *Runtime) newNativeConstructor(call func(ConstructorCall) *Object, name unistring.String, length int64) *Object {
v := &Object{runtime: r}
@@ -716,7 +640,7 @@ func (r *Runtime) newNativeConstructor(call func(ConstructorCall) *Object, name
class: classFunction,
val: v,
extensible: true,
- prototype: r.global.FunctionPrototype,
+ prototype: r.getFunctionPrototype(),
},
},
}
@@ -773,7 +697,7 @@ func (r *Runtime) newNativeFuncAndConstruct(v *Object, call func(call FunctionCa
class: classFunction,
val: v,
extensible: true,
- prototype: r.global.FunctionPrototype,
+ prototype: r.getFunctionPrototype(),
},
},
f: call,
@@ -788,7 +712,7 @@ func (r *Runtime) newNativeFuncAndConstruct(v *Object, call func(call FunctionCa
return f
}
-func (r *Runtime) newNativeFunc(call func(FunctionCall) Value, construct func(args []Value, proto *Object) *Object, name unistring.String, proto *Object, length int) *Object {
+func (r *Runtime) newNativeFunc(call func(FunctionCall) Value, name unistring.String, length int) *Object {
v := &Object{runtime: r}
f := &nativeFuncObject{
@@ -797,18 +721,13 @@ func (r *Runtime) newNativeFunc(call func(FunctionCall) Value, construct func(ar
class: classFunction,
val: v,
extensible: true,
- prototype: r.global.FunctionPrototype,
+ prototype: r.getFunctionPrototype(),
},
},
- f: call,
- construct: r.wrapNativeConstruct(construct, proto),
+ f: call,
}
v.self = f
f.init(name, intToValue(int64(length)))
- if proto != nil {
- f._putProp("prototype", proto, false, false, false)
- proto.self._putProp("constructor", v, true, false, true)
- }
return v
}
@@ -823,7 +742,7 @@ func (r *Runtime) newWrappedFunc(value reflect.Value) *Object {
class: classFunction,
val: v,
extensible: true,
- prototype: r.global.FunctionPrototype,
+ prototype: r.getFunctionPrototype(),
},
},
f: r.wrapReflectFunc(value),
@@ -843,11 +762,11 @@ func (r *Runtime) newNativeFuncConstructObj(v *Object, construct func(args []Val
class: classFunction,
val: v,
extensible: true,
- prototype: r.global.FunctionPrototype,
+ prototype: r.getFunctionPrototype(),
},
},
f: r.constructToCall(construct, proto),
- construct: r.wrapNativeConstruct(construct, proto),
+ construct: r.wrapNativeConstruct(construct, v, proto),
}
f.init(name, intToValue(int64(length)))
@@ -857,13 +776,11 @@ func (r *Runtime) newNativeFuncConstructObj(v *Object, construct func(args []Val
return f
}
-func (r *Runtime) newNativeFuncConstruct(construct func(args []Value, proto *Object) *Object, name unistring.String, prototype *Object, length int64) *Object {
- return r.newNativeFuncConstructProto(construct, name, prototype, r.global.FunctionPrototype, length)
+func (r *Runtime) newNativeFuncConstruct(v *Object, construct func(args []Value, proto *Object) *Object, name unistring.String, prototype *Object, length int64) *Object {
+ return r.newNativeFuncConstructProto(v, construct, name, prototype, r.getFunctionPrototype(), length)
}
-func (r *Runtime) newNativeFuncConstructProto(construct func(args []Value, proto *Object) *Object, name unistring.String, prototype, proto *Object, length int64) *Object {
- v := &Object{runtime: r}
-
+func (r *Runtime) newNativeFuncConstructProto(v *Object, construct func(args []Value, proto *Object) *Object, name unistring.String, prototype, proto *Object, length int64) *Object {
f := &nativeFuncObject{}
f.class = classFunction
f.val = v
@@ -871,11 +788,10 @@ func (r *Runtime) newNativeFuncConstructProto(construct func(args []Value, proto
v.self = f
f.prototype = proto
f.f = r.constructToCall(construct, prototype)
- f.construct = r.wrapNativeConstruct(construct, prototype)
+ f.construct = r.wrapNativeConstruct(construct, v, prototype)
f.init(name, intToValue(length))
if prototype != nil {
f._putProp("prototype", prototype, false, false, false)
- prototype.self._putProp("constructor", v, true, false, true)
}
return v
}
@@ -939,7 +855,7 @@ func (r *Runtime) builtin_newBoolean(args []Value, proto *Object) *Object {
}
func (r *Runtime) builtin_new(construct *Object, args []Value) *Object {
- return r.toConstructor(construct)(args, nil)
+ return r.toConstructor(construct)(args, construct)
}
func (r *Runtime) builtin_thrower(call FunctionCall) Value {
@@ -1013,21 +929,18 @@ func (r *Runtime) constructToCall(construct func(args []Value, proto *Object) *O
}
}
-func (r *Runtime) wrapNativeConstruct(c func(args []Value, proto *Object) *Object, proto *Object) func(args []Value, newTarget *Object) *Object {
+func (r *Runtime) wrapNativeConstruct(c func(args []Value, proto *Object) *Object, ctorObj, defProto *Object) func(args []Value, newTarget *Object) *Object {
if c == nil {
return nil
}
return func(args []Value, newTarget *Object) *Object {
- var p *Object
+ var proto *Object
if newTarget != nil {
- if pp, ok := newTarget.self.getStr("prototype", nil).(*Object); ok {
- p = pp
- }
- }
- if p == nil {
- p = proto
+ proto = r.getPrototypeFromCtor(newTarget, ctorObj, defProto)
+ } else {
+ proto = defProto
}
- return c(args, p)
+ return c(args, proto)
}
}
@@ -1289,7 +1202,7 @@ repeat:
return uint32(intVal)
}
fail:
- panic(r.newError(r.global.RangeError, "Invalid array length"))
+ panic(r.newError(r.getRangeError(), "Invalid array length"))
}
func toIntStrict(i int64) int {
@@ -1317,11 +1230,11 @@ func (r *Runtime) toIndex(v Value) int {
num := v.ToInteger()
if num >= 0 && num < maxInt {
if bits.UintSize == 32 && num >= math.MaxInt32 {
- panic(r.newError(r.global.RangeError, "Index %s overflows int", v.String()))
+ panic(r.newError(r.getRangeError(), "Index %s overflows int", v.String()))
}
return int(num)
}
- panic(r.newError(r.global.RangeError, "Invalid index %s", v.String()))
+ panic(r.newError(r.getRangeError(), "Invalid index %s", v.String()))
}
func (r *Runtime) toBoolean(b bool) Value {
@@ -1422,11 +1335,11 @@ func (r *Runtime) compile(name, src string, strict, inGlobal bool, evalVm *vm) (
switch x1 := err.(type) {
case *CompilerSyntaxError:
err = &Exception{
- val: r.builtin_new(r.global.SyntaxError, []Value{newStringValue(x1.Error())}),
+ val: r.builtin_new(r.getSyntaxError(), []Value{newStringValue(x1.Error())}),
}
case *CompilerReferenceError:
err = &Exception{
- val: r.newError(r.global.ReferenceError, x1.Message),
+ val: r.newError(r.getReferenceError(), x1.Message),
} // TODO proper message
}
}
@@ -1854,12 +1767,12 @@ func (r *Runtime) toValue(i interface{}, origValue reflect.Value) Value {
}
case func(FunctionCall) Value:
name := unistring.NewFromString(runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name())
- return r.newNativeFunc(i, nil, name, nil, 0)
+ return r.newNativeFunc(i, name, 0)
case func(FunctionCall, *Runtime) Value:
name := unistring.NewFromString(runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name())
return r.newNativeFunc(func(call FunctionCall) Value {
return i(call, r)
- }, nil, name, nil, 0)
+ }, name, 0)
case func(ConstructorCall) *Object:
name := unistring.NewFromString(runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name())
return r.newNativeConstructor(i, name, 0)
@@ -2810,16 +2723,6 @@ func (r *Runtime) createIterResultObject(value Value, done bool) Value {
return o
}
-func (r *Runtime) newLazyObject(create func(*Object) objectImpl) *Object {
- val := &Object{runtime: r}
- o := &lazyObject{
- val: val,
- create: create,
- }
- val.self = o
- return val
-}
-
func (r *Runtime) getHash() *maphash.Hash {
if r.hash == nil {
r.hash = &maphash.Hash{}
@@ -2979,7 +2882,7 @@ func (r *Runtime) iterableToList(iterable Value, method func(FunctionCall) Value
func (r *Runtime) putSpeciesReturnThis(o objectImpl) {
o._putSym(SymSpecies, &valueProperty{
- getterFunc: r.newNativeFunc(r.returnThis, nil, "get [Symbol.species]", nil, 0),
+ getterFunc: r.newNativeFunc(r.returnThis, "get [Symbol.species]", 0),
accessor: true,
configurable: true,
})
@@ -3209,3 +3112,18 @@ func assertCallable(v Value) (func(FunctionCall) Value, bool) {
func (r *Runtime) InstanceOf(left Value, right *Object) (res bool) {
return instanceOfOperator(left, right)
}
+
+func (r *Runtime) methodProp(f func(FunctionCall) Value, name unistring.String, nArgs int) Value {
+ return valueProp(r.newNativeFunc(f, name, nArgs), true, false, true)
+}
+
+func (r *Runtime) getPrototypeFromCtor(newTarget, defCtor, defProto *Object) *Object {
+ if newTarget == defCtor {
+ return defProto
+ }
+ proto := newTarget.self.getStr("prototype", nil)
+ if obj, ok := proto.(*Object); ok {
+ return obj
+ }
+ return defProto
+}
diff --git a/vendor/github.com/dop251/goja/string_ascii.go b/vendor/github.com/dop251/goja/string_ascii.go
index 0728f1e1a1..5ff21bf7b7 100644
--- a/vendor/github.com/dop251/goja/string_ascii.go
+++ b/vendor/github.com/dop251/goja/string_ascii.go
@@ -209,7 +209,7 @@ func (s asciiString) ToNumber() Value {
}
func (s asciiString) ToObject(r *Runtime) *Object {
- return r._newString(s, r.global.StringPrototype)
+ return r._newString(s, r.getStringPrototype())
}
func (s asciiString) SameAs(other Value) bool {
@@ -258,7 +258,7 @@ func (s asciiString) StrictEquals(other Value) bool {
}
func (s asciiString) baseObject(r *Runtime) *Object {
- ss := r.stringSingleton
+ ss := r.getStringSingleton()
ss.value = s
ss.setLength()
return ss.val
diff --git a/vendor/github.com/dop251/goja/string_imported.go b/vendor/github.com/dop251/goja/string_imported.go
index e78ee162a2..1c6cae88d0 100644
--- a/vendor/github.com/dop251/goja/string_imported.go
+++ b/vendor/github.com/dop251/goja/string_imported.go
@@ -88,7 +88,7 @@ func (i *importedString) ToBoolean() bool {
}
func (i *importedString) ToObject(r *Runtime) *Object {
- return r._newString(i, r.global.StringPrototype)
+ return r._newString(i, r.getStringPrototype())
}
func (i *importedString) SameAs(other Value) bool {
diff --git a/vendor/github.com/dop251/goja/string_unicode.go b/vendor/github.com/dop251/goja/string_unicode.go
index e3d9d96ef6..49e363fe8f 100644
--- a/vendor/github.com/dop251/goja/string_unicode.go
+++ b/vendor/github.com/dop251/goja/string_unicode.go
@@ -402,7 +402,7 @@ func (s unicodeString) ToNumber() Value {
}
func (s unicodeString) ToObject(r *Runtime) *Object {
- return r._newString(s, r.global.StringPrototype)
+ return r._newString(s, r.getStringPrototype())
}
func (s unicodeString) equals(other unicodeString) bool {
@@ -447,7 +447,7 @@ func (s unicodeString) StrictEquals(other Value) bool {
}
func (s unicodeString) baseObject(r *Runtime) *Object {
- ss := r.stringSingleton
+ ss := r.getStringSingleton()
ss.value = s
ss.setLength()
return ss.val
diff --git a/vendor/github.com/dop251/goja/typedarrays.go b/vendor/github.com/dop251/goja/typedarrays.go
index bb2b34355b..9af03503db 100644
--- a/vendor/github.com/dop251/goja/typedarrays.go
+++ b/vendor/github.com/dop251/goja/typedarrays.go
@@ -117,7 +117,7 @@ func (a ArrayBuffer) Detached() bool {
// using this typed array will result in unaligned access which may cause performance degradation or runtime panics
// on some architectures or configurations.
func (r *Runtime) NewArrayBuffer(data []byte) ArrayBuffer {
- buf := r._newArrayBuffer(r.global.ArrayBufferPrototype, nil)
+ buf := r._newArrayBuffer(r.getArrayBufferPrototype(), nil)
buf.data = data
return ArrayBuffer{
buf: buf,
@@ -901,6 +901,8 @@ func (r *Runtime) _newTypedArrayObject(buf *arrayBufferObject, offset, length, e
}
func (r *Runtime) newUint8ArrayObject(buf *arrayBufferObject, offset, length int, proto *Object) *typedArrayObject {
+ // Note, no need to use r.getUint8Array() here or in the similar methods below, because the value is already set
+ // by the time they are called.
return r._newTypedArrayObject(buf, offset, length, 1, r.global.Uint8Array, (*uint8Array)(&buf.data), proto)
}
@@ -939,7 +941,7 @@ func (r *Runtime) newFloat64ArrayObject(buf *arrayBufferObject, offset, length i
func (o *dataViewObject) getIdxAndByteOrder(getIdx int, littleEndianVal Value, size int) (int, byteOrder) {
o.viewedArrayBuf.ensureNotDetached(true)
if getIdx+size > o.byteLen {
- panic(o.val.runtime.newError(o.val.runtime.global.RangeError, "Index %d is out of bounds", getIdx))
+ panic(o.val.runtime.newError(o.val.runtime.getRangeError(), "Index %d is out of bounds", getIdx))
}
getIdx += o.byteOffset
var bo byteOrder
diff --git a/vendor/github.com/dop251/goja/value.go b/vendor/github.com/dop251/goja/value.go
index 987b9c8364..aeb96762c8 100644
--- a/vendor/github.com/dop251/goja/value.go
+++ b/vendor/github.com/dop251/goja/value.go
@@ -203,7 +203,7 @@ func (i valueInt) ToBoolean() bool {
}
func (i valueInt) ToObject(r *Runtime) *Object {
- return r.newPrimitiveObject(i, r.global.NumberPrototype, classNumber)
+ return r.newPrimitiveObject(i, r.getNumberPrototype(), classNumber)
}
func (i valueInt) ToNumber() Value {
@@ -243,7 +243,7 @@ func (i valueInt) StrictEquals(other Value) bool {
}
func (i valueInt) baseObject(r *Runtime) *Object {
- return r.global.NumberPrototype
+ return r.getNumberPrototype()
}
func (i valueInt) Export() interface{} {
@@ -299,7 +299,7 @@ func (b valueBool) ToBoolean() bool {
}
func (b valueBool) ToObject(r *Runtime) *Object {
- return r.newPrimitiveObject(b, r.global.BooleanPrototype, "Boolean")
+ return r.newPrimitiveObject(b, r.getBooleanPrototype(), "Boolean")
}
func (b valueBool) ToNumber() Value {
@@ -337,7 +337,7 @@ func (b valueBool) StrictEquals(other Value) bool {
}
func (b valueBool) baseObject(r *Runtime) *Object {
- return r.global.BooleanPrototype
+ return r.getBooleanPrototype()
}
func (b valueBool) Export() interface{} {
@@ -604,7 +604,7 @@ func (f valueFloat) ToBoolean() bool {
}
func (f valueFloat) ToObject(r *Runtime) *Object {
- return r.newPrimitiveObject(f, r.global.NumberPrototype, "Number")
+ return r.newPrimitiveObject(f, r.getNumberPrototype(), "Number")
}
func (f valueFloat) ToNumber() Value {
@@ -664,7 +664,7 @@ func (f valueFloat) StrictEquals(other Value) bool {
}
func (f valueFloat) baseObject(r *Runtime) *Object {
- return r.global.NumberPrototype
+ return r.getNumberPrototype()
}
func (f valueFloat) Export() interface{} {
@@ -1097,7 +1097,7 @@ func (s *Symbol) ExportType() reflect.Type {
}
func (s *Symbol) baseObject(r *Runtime) *Object {
- return r.newPrimitiveObject(s, r.global.SymbolPrototype, classObject)
+ return r.newPrimitiveObject(s, r.getSymbolPrototype(), classObject)
}
func (s *Symbol) hash(*maphash.Hash) uint64 {
diff --git a/vendor/github.com/dop251/goja/vm.go b/vendor/github.com/dop251/goja/vm.go
index 2dbd705f1e..dd328444d1 100644
--- a/vendor/github.com/dop251/goja/vm.go
+++ b/vendor/github.com/dop251/goja/vm.go
@@ -2475,7 +2475,7 @@ func (_pushArrayItem) exec(vm *vm) {
if arr.length < math.MaxUint32 {
arr.length++
} else {
- vm.throw(vm.r.newError(vm.r.global.RangeError, "Invalid array length"))
+ vm.throw(vm.r.newError(vm.r.getRangeError(), "Invalid array length"))
return
}
val := vm.stack[vm.sp-1]
@@ -2497,7 +2497,7 @@ func (_pushArraySpread) exec(vm *vm) {
if arr.length < math.MaxUint32 {
arr.length++
} else {
- vm.throw(vm.r.newError(vm.r.global.RangeError, "Invalid array length"))
+ vm.throw(vm.r.newError(vm.r.getRangeError(), "Invalid array length"))
return
}
arr.values = append(arr.values, val)
@@ -2545,7 +2545,7 @@ type newRegexp struct {
}
func (n *newRegexp) exec(vm *vm) {
- vm.push(vm.r.newRegExpp(n.pattern.clone(), n.src, vm.r.global.RegExpPrototype).val)
+ vm.push(vm.r.newRegExpp(n.pattern.clone(), n.src, vm.r.getRegExpPrototype()).val)
vm.pc++
}
@@ -3828,7 +3828,7 @@ func (n *newAsyncArrowFunc) exec(vm *vm) {
}
func (vm *vm) alreadyDeclared(name unistring.String) Value {
- return vm.r.newError(vm.r.global.SyntaxError, "Identifier '%s' has already been declared", name)
+ return vm.r.newError(vm.r.getSyntaxError(), "Identifier '%s' has already been declared", name)
}
func (vm *vm) checkBindVarsGlobal(names []unistring.String) {
@@ -4598,7 +4598,7 @@ func (formalArgs createArgsMapped) exec(vm *vm) {
}
args._putProp("callee", vm.stack[vm.sb-1], true, false, true)
- args._putSym(SymIterator, valueProp(vm.r.global.arrayValues, true, false, true))
+ args._putSym(SymIterator, valueProp(vm.r.getArrayValues(), true, false, true))
vm.push(v)
vm.pc++
}
@@ -4623,8 +4623,8 @@ func (formalArgs createArgsUnmapped) exec(vm *vm) {
}
args._putProp("length", intToValue(int64(vm.args)), true, false, true)
- args._put("callee", vm.r.global.throwerProperty)
- args._putSym(SymIterator, valueProp(vm.r.global.arrayValues, true, false, true))
+ args._put("callee", vm.r.newThrowerProperty(false))
+ args._putSym(SymIterator, valueProp(vm.r.getArrayValues(), true, false, true))
vm.push(args.val)
vm.pc++
}
@@ -5057,7 +5057,7 @@ func (c *newClass) create(protoParent, ctorParent *Object, vm *vm, derived bool)
}
func (c *newClass) exec(vm *vm) {
- proto, cls := c.create(vm.r.global.ObjectPrototype, vm.r.global.FunctionPrototype, vm, false)
+ proto, cls := c.create(vm.r.global.ObjectPrototype, vm.r.getFunctionPrototype(), vm, false)
sp := vm.sp
vm.stack.expand(sp + 1)
vm.stack[sp] = proto
@@ -5086,7 +5086,7 @@ func (c *newDerivedClass) exec(vm *vm) {
superClass = sc
}
} else {
- superClass = vm.r.global.FunctionPrototype
+ superClass = vm.r.getFunctionPrototype()
}
proto, cls := c.create(protoParent, superClass, vm, true)
@@ -5103,7 +5103,7 @@ type newStaticFieldInit struct {
}
func (c *newStaticFieldInit) exec(vm *vm) {
- f := vm.r.newClassFunc("", 0, vm.r.global.FunctionPrototype, false)
+ f := vm.r.newClassFunc("", 0, vm.r.getFunctionPrototype(), false)
if c.numPrivateFields > 0 || c.numPrivateMethods > 0 {
vm.createPrivateType(f, c.numPrivateFields, c.numPrivateMethods)
}
@@ -5117,7 +5117,7 @@ func (vm *vm) loadThis(v Value) {
if v != nil {
vm.push(v)
} else {
- vm.throw(vm.r.newError(vm.r.global.ReferenceError, "Must call super constructor in derived class before accessing 'this'"))
+ vm.throw(vm.r.newError(vm.r.getReferenceError(), "Must call super constructor in derived class before accessing 'this'"))
return
}
vm.pc++
@@ -5202,7 +5202,7 @@ func (resolveThisDynamic) exec(vm *vm) {
}
}
}
- panic(vm.r.newError(vm.r.global.ReferenceError, "Compiler bug: 'this' reference is not found in resolveThisDynamic"))
+ panic(vm.r.newError(vm.r.getReferenceError(), "Compiler bug: 'this' reference is not found in resolveThisDynamic"))
}
type defineComputedKey int
@@ -5464,15 +5464,15 @@ func (vm *vm) exceptionFromValue(x interface{}) *Exception {
}
case referenceError:
ex = &Exception{
- val: vm.r.newError(vm.r.global.ReferenceError, string(x1)),
+ val: vm.r.newError(vm.r.getReferenceError(), string(x1)),
}
case rangeError:
ex = &Exception{
- val: vm.r.newError(vm.r.global.RangeError, string(x1)),
+ val: vm.r.newError(vm.r.getRangeError(), string(x1)),
}
case syntaxError:
ex = &Exception{
- val: vm.r.newError(vm.r.global.SyntaxError, string(x1)),
+ val: vm.r.newError(vm.r.getSyntaxError(), string(x1)),
}
default:
/*
diff --git a/vendor/github.com/klauspost/compress/s2/.gitignore b/vendor/github.com/klauspost/compress/s2/.gitignore
new file mode 100644
index 0000000000..3a89c6e3e2
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/.gitignore
@@ -0,0 +1,15 @@
+testdata/bench
+
+# These explicitly listed benchmark data files are for an obsolete version of
+# snappy_test.go.
+testdata/alice29.txt
+testdata/asyoulik.txt
+testdata/fireworks.jpeg
+testdata/geo.protodata
+testdata/html
+testdata/html_x_4
+testdata/kppkn.gtb
+testdata/lcet10.txt
+testdata/paper-100k.pdf
+testdata/plrabn12.txt
+testdata/urls.10K
diff --git a/vendor/github.com/klauspost/compress/s2/LICENSE b/vendor/github.com/klauspost/compress/s2/LICENSE
new file mode 100644
index 0000000000..1d2d645bd9
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+Copyright (c) 2019 Klaus Post. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/klauspost/compress/s2/README.md b/vendor/github.com/klauspost/compress/s2/README.md
new file mode 100644
index 0000000000..8284bb0810
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/README.md
@@ -0,0 +1,1120 @@
+# S2 Compression
+
+S2 is an extension of [Snappy](https://github.com/google/snappy).
+
+S2 is aimed for high throughput, which is why it features concurrent compression for bigger payloads.
+
+Decoding is compatible with Snappy compressed content, but content compressed with S2 cannot be decompressed by Snappy.
+This means that S2 can seamlessly replace Snappy without converting compressed content.
+
+S2 can produce Snappy compatible output, faster and better than Snappy.
+If you want full benefit of the changes you should use s2 without Snappy compatibility.
+
+S2 is designed to have high throughput on content that cannot be compressed.
+This is important, so you don't have to worry about spending CPU cycles on already compressed data.
+
+## Benefits over Snappy
+
+* Better compression
+* Adjustable compression (3 levels)
+* Concurrent stream compression
+* Faster decompression, even for Snappy compatible content
+* Concurrent Snappy/S2 stream decompression
+* Skip forward in compressed stream
+* Random seeking with indexes
+* Compatible with reading Snappy compressed content
+* Smaller block size overhead on incompressible blocks
+* Block concatenation
+* Block Dictionary support
+* Uncompressed stream mode
+* Automatic stream size padding
+* Snappy compatible block compression
+
+## Drawbacks over Snappy
+
+* Not optimized for 32 bit systems
+* Streams use slightly more memory due to larger blocks and concurrency (configurable)
+
+# Usage
+
+Installation: `go get -u github.com/klauspost/compress/s2`
+
+Full package documentation:
+
+[![godoc][1]][2]
+
+[1]: https://godoc.org/github.com/klauspost/compress?status.svg
+[2]: https://godoc.org/github.com/klauspost/compress/s2
+
+## Compression
+
+```Go
+func EncodeStream(src io.Reader, dst io.Writer) error {
+ enc := s2.NewWriter(dst)
+ _, err := io.Copy(enc, src)
+ if err != nil {
+ enc.Close()
+ return err
+ }
+ // Blocks until compression is done.
+ return enc.Close()
+}
+```
+
+You should always call `enc.Close()`, otherwise you will leak resources and your encode will be incomplete.
+
+For the best throughput, you should attempt to reuse the `Writer` using the `Reset()` method.
+
+The Writer in S2 is always buffered, therefore `NewBufferedWriter` in Snappy can be replaced with `NewWriter` in S2.
+It is possible to flush any buffered data using the `Flush()` method.
+This will block until all data sent to the encoder has been written to the output.
+
+S2 also supports the `io.ReaderFrom` interface, which will consume all input from a reader.
+
+As a final method to compress data, if you have a single block of data you would like to have encoded as a stream,
+a slightly more efficient method is to use the `EncodeBuffer` method.
+This will take ownership of the buffer until the stream is closed.
+
+```Go
+func EncodeStream(src []byte, dst io.Writer) error {
+ enc := s2.NewWriter(dst)
+ // The encoder owns the buffer until Flush or Close is called.
+ err := enc.EncodeBuffer(buf)
+ if err != nil {
+ enc.Close()
+ return err
+ }
+ // Blocks until compression is done.
+ return enc.Close()
+}
+```
+
+Each call to `EncodeBuffer` will result in discrete blocks being created without buffering,
+so it should only be used a single time per stream.
+If you need to write several blocks, you should use the regular io.Writer interface.
+
+
+## Decompression
+
+```Go
+func DecodeStream(src io.Reader, dst io.Writer) error {
+ dec := s2.NewReader(src)
+ _, err := io.Copy(dst, dec)
+ return err
+}
+```
+
+Similar to the Writer, a Reader can be reused using the `Reset` method.
+
+For the best possible throughput, there is a `EncodeBuffer(buf []byte)` function available.
+However, it requires that the provided buffer isn't used after it is handed over to S2 and until the stream is flushed or closed.
+
+For smaller data blocks, there is also a non-streaming interface: `Encode()`, `EncodeBetter()` and `Decode()`.
+Do however note that these functions (similar to Snappy) does not provide validation of data,
+so data corruption may be undetected. Stream encoding provides CRC checks of data.
+
+It is possible to efficiently skip forward in a compressed stream using the `Skip()` method.
+For big skips the decompressor is able to skip blocks without decompressing them.
+
+## Single Blocks
+
+Similar to Snappy S2 offers single block compression.
+Blocks do not offer the same flexibility and safety as streams,
+but may be preferable for very small payloads, less than 100K.
+
+Using a simple `dst := s2.Encode(nil, src)` will compress `src` and return the compressed result.
+It is possible to provide a destination buffer.
+If the buffer has a capacity of `s2.MaxEncodedLen(len(src))` it will be used.
+If not a new will be allocated.
+
+Alternatively `EncodeBetter`/`EncodeBest` can also be used for better, but slightly slower compression.
+
+Similarly to decompress a block you can use `dst, err := s2.Decode(nil, src)`.
+Again an optional destination buffer can be supplied.
+The `s2.DecodedLen(src)` can be used to get the minimum capacity needed.
+If that is not satisfied a new buffer will be allocated.
+
+Block function always operate on a single goroutine since it should only be used for small payloads.
+
+# Commandline tools
+
+Some very simply commandline tools are provided; `s2c` for compression and `s2d` for decompression.
+
+Binaries can be downloaded on the [Releases Page](https://github.com/klauspost/compress/releases).
+
+Installing then requires Go to be installed. To install them, use:
+
+`go install github.com/klauspost/compress/s2/cmd/s2c@latest && go install github.com/klauspost/compress/s2/cmd/s2d@latest`
+
+To build binaries to the current folder use:
+
+`go build github.com/klauspost/compress/s2/cmd/s2c && go build github.com/klauspost/compress/s2/cmd/s2d`
+
+
+## s2c
+
+```
+Usage: s2c [options] file1 file2
+
+Compresses all files supplied as input separately.
+Output files are written as 'filename.ext.s2' or 'filename.ext.snappy'.
+By default output files will be overwritten.
+Use - as the only file name to read from stdin and write to stdout.
+
+Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt
+Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt
+
+File names beginning with 'http://' and 'https://' will be downloaded and compressed.
+Only http response code 200 is accepted.
+
+Options:
+ -bench int
+ Run benchmark n times. No output will be written
+ -blocksize string
+ Max block size. Examples: 64K, 256K, 1M, 4M. Must be power of two and <= 4MB (default "4M")
+ -c Write all output to stdout. Multiple input files will be concatenated
+ -cpu int
+ Compress using this amount of threads (default 32)
+ -faster
+ Compress faster, but with a minor compression loss
+ -help
+ Display help
+ -index
+ Add seek index (default true)
+ -o string
+ Write output to another file. Single input file only
+ -pad string
+ Pad size to a multiple of this value, Examples: 500, 64K, 256K, 1M, 4M, etc (default "1")
+ -q Don't write any output to terminal, except errors
+ -rm
+ Delete source file(s) after successful compression
+ -safe
+ Do not overwrite output files
+ -slower
+ Compress more, but a lot slower
+ -snappy
+ Generate Snappy compatible output stream
+ -verify
+ Verify written files
+
+```
+
+## s2d
+
+```
+Usage: s2d [options] file1 file2
+
+Decompresses all files supplied as input. Input files must end with '.s2' or '.snappy'.
+Output file names have the extension removed. By default output files will be overwritten.
+Use - as the only file name to read from stdin and write to stdout.
+
+Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt
+Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt
+
+File names beginning with 'http://' and 'https://' will be downloaded and decompressed.
+Extensions on downloaded files are ignored. Only http response code 200 is accepted.
+
+Options:
+ -bench int
+ Run benchmark n times. No output will be written
+ -c Write all output to stdout. Multiple input files will be concatenated
+ -help
+ Display help
+ -o string
+ Write output to another file. Single input file only
+ -offset string
+ Start at offset. Examples: 92, 64K, 256K, 1M, 4M. Requires Index
+ -q Don't write any output to terminal, except errors
+ -rm
+ Delete source file(s) after successful decompression
+ -safe
+ Do not overwrite output files
+ -tail string
+ Return last of compressed file. Examples: 92, 64K, 256K, 1M, 4M. Requires Index
+ -verify
+ Verify files, but do not write output
+```
+
+## s2sx: self-extracting archives
+
+s2sx allows creating self-extracting archives with no dependencies.
+
+By default, executables are created for the same platforms as the host os,
+but this can be overridden with `-os` and `-arch` parameters.
+
+Extracted files have 0666 permissions, except when untar option used.
+
+```
+Usage: s2sx [options] file1 file2
+
+Compresses all files supplied as input separately.
+If files have '.s2' extension they are assumed to be compressed already.
+Output files are written as 'filename.s2sx' and with '.exe' for windows targets.
+If output is big, an additional file with ".more" is written. This must be included as well.
+By default output files will be overwritten.
+
+Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt
+Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt
+
+Options:
+ -arch string
+ Destination architecture (default "amd64")
+ -c Write all output to stdout. Multiple input files will be concatenated
+ -cpu int
+ Compress using this amount of threads (default 32)
+ -help
+ Display help
+ -max string
+ Maximum executable size. Rest will be written to another file. (default "1G")
+ -os string
+ Destination operating system (default "windows")
+ -q Don't write any output to terminal, except errors
+ -rm
+ Delete source file(s) after successful compression
+ -safe
+ Do not overwrite output files
+ -untar
+ Untar on destination
+```
+
+Available platforms are:
+
+ * darwin-amd64
+ * darwin-arm64
+ * linux-amd64
+ * linux-arm
+ * linux-arm64
+ * linux-mips64
+ * linux-ppc64le
+ * windows-386
+ * windows-amd64
+
+By default, there is a size limit of 1GB for the output executable.
+
+When this is exceeded the remaining file content is written to a file called
+output+`.more`. This file must be included for a successful extraction and
+placed alongside the executable for a successful extraction.
+
+This file *must* have the same name as the executable, so if the executable is renamed,
+so must the `.more` file.
+
+This functionality is disabled with stdin/stdout.
+
+### Self-extracting TAR files
+
+If you wrap a TAR file you can specify `-untar` to make it untar on the destination host.
+
+Files are extracted to the current folder with the path specified in the tar file.
+
+Note that tar files are not validated before they are wrapped.
+
+For security reasons files that move below the root folder are not allowed.
+
+# Performance
+
+This section will focus on comparisons to Snappy.
+This package is solely aimed at replacing Snappy as a high speed compression package.
+If you are mainly looking for better compression [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd)
+gives better compression, but typically at speeds slightly below "better" mode in this package.
+
+Compression is increased compared to Snappy, mostly around 5-20% and the throughput is typically 25-40% increased (single threaded) compared to the Snappy Go implementation.
+
+Streams are concurrently compressed. The stream will be distributed among all available CPU cores for the best possible throughput.
+
+A "better" compression mode is also available. This allows to trade a bit of speed for a minor compression gain.
+The content compressed in this mode is fully compatible with the standard decoder.
+
+Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU):
+
+| File | S2 Speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller |
+|---------------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------|
+| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 16.33x | 10556 MB/s | 8.0% | 6.04x | 5252 MB/s | 14.7% |
+| (1 CPU) | 1.08x | 940 MB/s | - | 0.46x | 400 MB/s | - |
+| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 16.51x | 15224 MB/s | 31.70% | 9.47x | 8734 MB/s | 37.71% |
+| (1 CPU) | 1.26x | 1157 MB/s | - | 0.60x | 556 MB/s | - |
+| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12598 MB/s | -5.76% | 6.23x | 5675 MB/s | 3.62% |
+| (1 CPU) | 1.02x | 932 MB/s | - | 0.47x | 432 MB/s | - |
+| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 11.21x | 12116 MB/s | 15.95% | 3.24x | 3500 MB/s | 18.00% |
+| (1 CPU) | 1.05x | 1135 MB/s | - | 0.27x | 292 MB/s | - |
+| [apache.log](https://files.klauspost.com/compress/apache.log.zst) | 8.55x | 16673 MB/s | 20.54% | 5.85x | 11420 MB/s | 24.97% |
+| (1 CPU) | 1.91x | 1771 MB/s | - | 0.53x | 1041 MB/s | - |
+| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 15.76x | 14357 MB/s | 24.01% | 8.67x | 7891 MB/s | 33.68% |
+| (1 CPU) | 1.17x | 1064 MB/s | - | 0.65x | 595 MB/s | - |
+| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9835 MB/s | 2.34% | 6.85x | 4863 MB/s | 9.96% |
+| (1 CPU) | 0.97x | 689 MB/s | - | 0.55x | 387 MB/s | - |
+| sharnd.out.2gb | 9.11x | 13213 MB/s | 0.01% | 1.49x | 9184 MB/s | 0.01% |
+| (1 CPU) | 0.88x | 5418 MB/s | - | 0.77x | 5417 MB/s | - |
+| [sofia-air-quality-dataset csv](https://files.klauspost.com/compress/sofia-air-quality-dataset.tar.zst) | 22.00x | 11477 MB/s | 18.73% | 11.15x | 5817 MB/s | 27.88% |
+| (1 CPU) | 1.23x | 642 MB/s | - | 0.71x | 642 MB/s | - |
+| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 11.23x | 6520 MB/s | 5.9% | 5.35x | 3109 MB/s | 15.88% |
+| (1 CPU) | 1.05x | 607 MB/s | - | 0.52x | 304 MB/s | - |
+| [enwik9](https://files.klauspost.com/compress/enwik9.zst) | 19.28x | 8440 MB/s | 4.04% | 9.31x | 4076 MB/s | 18.04% |
+| (1 CPU) | 1.12x | 488 MB/s | - | 0.57x | 250 MB/s | - |
+
+### Legend
+
+* `S2 Speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core.
+* `S2 Throughput`: Throughput of S2 in MB/s.
+* `S2 % smaller`: How many percent of the Snappy output size is S2 better.
+* `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy.
+* `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy.
+* `"better" % smaller`: How many percent of the Snappy output size is S2 better when using "better" compression.
+
+There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads.
+
+Machine generated data gets by far the biggest compression boost, with size being reduced by up to 35% of Snappy size.
+
+The "better" compression mode sees a good improvement in all cases, but usually at a performance cost.
+
+Incompressible content (`sharnd.out.2gb`, 2GB random data) sees the smallest speedup.
+This is likely dominated by synchronization overhead, which is confirmed by the fact that single threaded performance is higher (see above).
+
+## Decompression
+
+S2 attempts to create content that is also fast to decompress, except in "better" mode where the smallest representation is used.
+
+S2 vs Snappy **decompression** speed. Both operating on single core:
+
+| File | S2 Throughput | vs. Snappy | Better Throughput | vs. Snappy |
+|-----------------------------------------------------------------------------------------------------|---------------|------------|-------------------|------------|
+| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 2117 MB/s | 1.14x | 1738 MB/s | 0.94x |
+| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 2401 MB/s | 1.25x | 2307 MB/s | 1.20x |
+| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 2075 MB/s | 0.98x | 1764 MB/s | 0.83x |
+| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 2967 MB/s | 1.05x | 2885 MB/s | 1.02x |
+| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 4141 MB/s | 1.07x | 4184 MB/s | 1.08x |
+| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 2264 MB/s | 1.12x | 2185 MB/s | 1.08x |
+| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 1525 MB/s | 1.03x | 1347 MB/s | 0.91x |
+| sharnd.out.2gb | 3813 MB/s | 0.79x | 3900 MB/s | 0.81x |
+| [enwik9](http://mattmahoney.net/dc/textdata.html) | 1246 MB/s | 1.29x | 967 MB/s | 1.00x |
+| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 1433 MB/s | 1.12x | 1203 MB/s | 0.94x |
+| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 1284 MB/s | 1.32x | 1010 MB/s | 1.04x |
+
+### Legend
+
+* `S2 Throughput`: Decompression speed of S2 encoded content.
+* `Better Throughput`: Decompression speed of S2 "better" encoded content.
+* `vs Snappy`: Decompression speed of S2 "better" mode compared to Snappy and absolute speed.
+
+
+While the decompression code hasn't changed, there is a significant speedup in decompression speed.
+S2 prefers longer matches and will typically only find matches that are 6 bytes or longer.
+While this reduces compression a bit, it improves decompression speed.
+
+The "better" compression mode will actively look for shorter matches, which is why it has a decompression speed quite similar to Snappy.
+
+Without assembly decompression is also very fast; single goroutine decompression speed. No assembly:
+
+| File | S2 Throughput | S2 throughput |
+|--------------------------------|---------------|---------------|
+| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s |
+| 10gb.tar.s2 | 1.30x | 867.07 MB/s |
+| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s |
+| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s |
+| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s |
+| enwik9.s2 | 1.67x | 681.53 MB/s |
+| adresser.json.s2 | 3.41x | 4230.53 MB/s |
+| silesia.tar.s2 | 1.52x | 811.58 |
+
+Even though S2 typically compresses better than Snappy, decompression speed is always better.
+
+### Concurrent Stream Decompression
+
+For full stream decompression S2 offers a [DecodeConcurrent](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.DecodeConcurrent)
+that will decode a full stream using multiple goroutines.
+
+Example scaling, AMD Ryzen 3950X, 16 cores, decompression using `s2d -bench=3 `, best of 3:
+
+| Input | `-cpu=1` | `-cpu=2` | `-cpu=4` | `-cpu=8` | `-cpu=16` |
+|-------------------------------------------|------------|------------|------------|------------|-------------|
+| enwik10.snappy | 1098.6MB/s | 1819.8MB/s | 3625.6MB/s | 6910.6MB/s | 10818.2MB/s |
+| enwik10.s2 | 1303.5MB/s | 2606.1MB/s | 4847.9MB/s | 8878.4MB/s | 9592.1MB/s |
+| sofia-air-quality-dataset.tar.snappy | 1302.0MB/s | 2165.0MB/s | 4244.5MB/s | 8241.0MB/s | 12920.5MB/s |
+| sofia-air-quality-dataset.tar.s2 | 1399.2MB/s | 2463.2MB/s | 5196.5MB/s | 9639.8MB/s | 11439.5MB/s |
+| sofia-air-quality-dataset.tar.s2 (no asm) | 837.5MB/s | 1652.6MB/s | 3183.6MB/s | 5945.0MB/s | 9620.7MB/s |
+
+Scaling can be expected to be pretty linear until memory bandwidth is saturated.
+
+For now the DecodeConcurrent can only be used for full streams without seeking or combining with regular reads.
+
+## Block compression
+
+
+When compressing blocks no concurrent compression is performed just as Snappy.
+This is because blocks are for smaller payloads and generally will not benefit from concurrent compression.
+
+An important change is that incompressible blocks will not be more than at most 10 bytes bigger than the input.
+In rare, worst case scenario Snappy blocks could be significantly bigger than the input.
+
+### Mixed content blocks
+
+The most reliable is a wide dataset.
+For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z),
+53927 files, total input size: 4,014,735,833 bytes. Single goroutine used.
+
+| * | Input | Output | Reduction | MB/s |
+|-------------------|------------|------------|------------|------------|
+| S2 | 4014735833 | 1059723369 | 73.60% | **936.73** |
+| S2 Better | 4014735833 | 961580539 | 76.05% | 451.10 |
+| S2 Best | 4014735833 | 899182886 | **77.60%** | 46.84 |
+| Snappy | 4014735833 | 1128706759 | 71.89% | 790.15 |
+| S2, Snappy Output | 4014735833 | 1093823291 | 72.75% | 936.60 |
+| LZ4 | 4014735833 | 1063768713 | 73.50% | 452.02 |
+
+S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best".
+"Better" mode provides the same compression speed as LZ4 with better compression ratio.
+
+When outputting Snappy compatible output it still delivers better throughput (150MB/s more) and better compression.
+
+As can be seen from the other benchmarks decompression should also be easier on the S2 generated output.
+
+Though they cannot be compared due to different decompression speeds here are the speed/size comparisons for
+other Go compressors:
+
+| * | Input | Output | Reduction | MB/s |
+|-------------------|------------|------------|-----------|--------|
+| Zstd Fastest (Go) | 4014735833 | 794608518 | 80.21% | 236.04 |
+| Zstd Best (Go) | 4014735833 | 704603356 | 82.45% | 35.63 |
+| Deflate (Go) l1 | 4014735833 | 871294239 | 78.30% | 214.04 |
+| Deflate (Go) l9 | 4014735833 | 730389060 | 81.81% | 41.17 |
+
+### Standard block compression
+
+Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns.
+So individual benchmarks should only be seen as a guideline and the overall picture is more important.
+
+These micro-benchmarks are with data in cache and trained branch predictors. For a more realistic benchmark see the mixed content above.
+
+Block compression. Parallel benchmark running on 16 cores, 16 goroutines.
+
+AMD64 assembly is use for both S2 and Snappy.
+
+| Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec |
+|-----------------------|-------------|---------|--------------|-------------|-------------|-------------|
+| html | 22843 | 20868 | 16246 MB/s | 18617 MB/s | 40972 MB/s | 49263 MB/s |
+| urls.10K | 335492 | 286541 | 7943 MB/s | 10201 MB/s | 22523 MB/s | 26484 MB/s |
+| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 303228 MB/s | 718321 MB/s | 827552 MB/s |
+| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 20180 MB/s | 33691 MB/s | 52421 MB/s |
+| paper-100k.pdf | 85304 | 84202 | 167546 MB/s | 112988 MB/s | 326905 MB/s | 291944 MB/s |
+| html_x_4 | 92234 | 20870 | 15194 MB/s | 54457 MB/s | 30843 MB/s | 32217 MB/s |
+| alice29.txt | 88034 | 85934 | 5936 MB/s | 6540 MB/s | 12882 MB/s | 20044 MB/s |
+| asyoulik.txt | 77503 | 79575 | 5517 MB/s | 6657 MB/s | 12735 MB/s | 22806 MB/s |
+| lcet10.txt | 234661 | 220383 | 6235 MB/s | 6303 MB/s | 14519 MB/s | 18697 MB/s |
+| plrabn12.txt | 319267 | 318196 | 5159 MB/s | 6074 MB/s | 11923 MB/s | 19901 MB/s |
+| geo.protodata | 23335 | 18606 | 21220 MB/s | 25432 MB/s | 56271 MB/s | 62540 MB/s |
+| kppkn.gtb | 69526 | 65019 | 9732 MB/s | 8905 MB/s | 18491 MB/s | 18969 MB/s |
+| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 17179 MB/s | 31883 MB/s | 38874 MB/s |
+| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13273 MB/s | 48056 MB/s | 52341 MB/s |
+| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12824 MB/s | 32378 MB/s | 46322 MB/s |
+| alice29.txt (20000B) | 12686 | 13516 | 7733 MB/s | 12160 MB/s | 30566 MB/s | 58969 MB/s |
+
+
+Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size.
+
+Decompression speed is better than Snappy, except in one case.
+
+Since payloads are very small the variance in terms of size is rather big, so they should only be seen as a general guideline.
+
+Size is on average around Snappy, but varies on content type.
+In cases where compression is worse, it usually is compensated by a speed boost.
+
+
+### Better compression
+
+Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns.
+So individual benchmarks should only be seen as a guideline and the overall picture is more important.
+
+| Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec |
+|-----------------------|-------------|-------------|--------------|--------------|-------------|-------------|
+| html | 22843 | 18972 | 16246 MB/s | 8621 MB/s | 40972 MB/s | 40292 MB/s |
+| urls.10K | 335492 | 248079 | 7943 MB/s | 5104 MB/s | 22523 MB/s | 20981 MB/s |
+| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 84429 MB/s | 718321 MB/s | 823698 MB/s |
+| fireworks.jpeg (200B) | 146 | 149 | 8869 MB/s | 7125 MB/s | 33691 MB/s | 30101 MB/s |
+| paper-100k.pdf | 85304 | 82887 | 167546 MB/s | 11087 MB/s | 326905 MB/s | 198869 MB/s |
+| html_x_4 | 92234 | 18982 | 15194 MB/s | 29316 MB/s | 30843 MB/s | 30937 MB/s |
+| alice29.txt | 88034 | 71611 | 5936 MB/s | 3709 MB/s | 12882 MB/s | 16611 MB/s |
+| asyoulik.txt | 77503 | 65941 | 5517 MB/s | 3380 MB/s | 12735 MB/s | 14975 MB/s |
+| lcet10.txt | 234661 | 184939 | 6235 MB/s | 3537 MB/s | 14519 MB/s | 16634 MB/s |
+| plrabn12.txt | 319267 | 264990 | 5159 MB/s | 2960 MB/s | 11923 MB/s | 13382 MB/s |
+| geo.protodata | 23335 | 17689 | 21220 MB/s | 10859 MB/s | 56271 MB/s | 57961 MB/s |
+| kppkn.gtb | 69526 | 55398 | 9732 MB/s | 5206 MB/s | 18491 MB/s | 16524 MB/s |
+| alice29.txt (128B) | 80 | 78 | 6691 MB/s | 7422 MB/s | 31883 MB/s | 34225 MB/s |
+| alice29.txt (1000B) | 774 | 746 | 12204 MB/s | 5734 MB/s | 48056 MB/s | 42068 MB/s |
+| alice29.txt (10000B) | 6648 | 6218 | 10044 MB/s | 6055 MB/s | 32378 MB/s | 28813 MB/s |
+| alice29.txt (20000B) | 12686 | 11492 | 7733 MB/s | 3143 MB/s | 30566 MB/s | 27315 MB/s |
+
+
+Except for the mostly incompressible JPEG image compression is better and usually in the
+double digits in terms of percentage reduction over Snappy.
+
+The PDF sample shows a significant slowdown compared to Snappy, as this mode tries harder
+to compress the data. Very small blocks are also not favorable for better compression, so throughput is way down.
+
+This mode aims to provide better compression at the expense of performance and achieves that
+without a huge performance penalty, except on very small blocks.
+
+Decompression speed suffers a little compared to the regular S2 mode,
+but still manages to be close to Snappy in spite of increased compression.
+
+# Best compression mode
+
+S2 offers a "best" compression mode.
+
+This will compress as much as possible with little regard to CPU usage.
+
+Mainly for offline compression, but where decompression speed should still
+be high and compatible with other S2 compressed data.
+
+Some examples compared on 16 core CPU, amd64 assembly used:
+
+```
+* enwik10
+Default... 10000000000 -> 4759950115 [47.60%]; 1.03s, 9263.0MB/s
+Better... 10000000000 -> 4084706676 [40.85%]; 2.16s, 4415.4MB/s
+Best... 10000000000 -> 3615520079 [36.16%]; 42.259s, 225.7MB/s
+
+* github-june-2days-2019.json
+Default... 6273951764 -> 1041700255 [16.60%]; 431ms, 13882.3MB/s
+Better... 6273951764 -> 945841238 [15.08%]; 547ms, 10938.4MB/s
+Best... 6273951764 -> 826392576 [13.17%]; 9.455s, 632.8MB/s
+
+* nyc-taxi-data-10M.csv
+Default... 3325605752 -> 1093516949 [32.88%]; 324ms, 9788.7MB/s
+Better... 3325605752 -> 885394158 [26.62%]; 491ms, 6459.4MB/s
+Best... 3325605752 -> 773681257 [23.26%]; 8.29s, 412.0MB/s
+
+* 10gb.tar
+Default... 10065157632 -> 5915541066 [58.77%]; 1.028s, 9337.4MB/s
+Better... 10065157632 -> 5453844650 [54.19%]; 1.597s, 4862.7MB/s
+Best... 10065157632 -> 5192495021 [51.59%]; 32.78s, 308.2MB/
+
+* consensus.db.10gb
+Default... 10737418240 -> 4549762344 [42.37%]; 882ms, 12118.4MB/s
+Better... 10737418240 -> 4438535064 [41.34%]; 1.533s, 3500.9MB/s
+Best... 10737418240 -> 4210602774 [39.21%]; 42.96s, 254.4MB/s
+```
+
+Decompression speed should be around the same as using the 'better' compression mode.
+
+## Dictionaries
+
+*Note: S2 dictionary compression is currently at an early implementation stage, with no assembly for
+neither encoding nor decoding. Performance improvements can be expected in the future.*
+
+Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks.
+
+The same dictionary *must* be used for both encoding and decoding.
+S2 does not keep track of whether the same dictionary is used,
+and using the wrong dictionary will most often not result in an error when decompressing.
+
+Blocks encoded *without* dictionaries can be decompressed seamlessly *with* a dictionary.
+This means it is possible to switch from an encoding without dictionaries to an encoding with dictionaries
+and treat the blocks similarly.
+
+Similar to [zStandard dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression),
+the same usage scenario applies to S2 dictionaries.
+
+> Training works if there is some correlation in a family of small data samples. The more data-specific a dictionary is, the more efficient it is (there is no universal dictionary). Hence, deploying one dictionary per type of data will provide the greatest benefits. Dictionary gains are mostly effective in the first few KB. Then, the compression algorithm will gradually use previously decoded content to better compress the rest of the file.
+
+S2 further limits the dictionary to only be enabled on the first 64KB of a block.
+This will remove any negative (speed) impacts of the dictionaries on bigger blocks.
+
+### Compression
+
+Using the [github_users_sample_set](https://github.com/facebook/zstd/releases/download/v1.1.3/github_users_sample_set.tar.zst)
+and a 64KB dictionary trained with zStandard the following sizes can be achieved.
+
+| | Default | Better | Best |
+|--------------------|------------------|------------------|-----------------------|
+| Without Dictionary | 3362023 (44.92%) | 3083163 (41.19%) | 3057944 (40.86%) |
+| With Dictionary | 921524 (12.31%) | 873154 (11.67%) | 785503 bytes (10.49%) |
+
+So for highly repetitive content, this case provides an almost 3x reduction in size.
+
+For less uniform data we will use the Go source code tree.
+Compressing First 64KB of all `.go` files in `go/src`, Go 1.19.5, 8912 files, 51253563 bytes input:
+
+| | Default | Better | Best |
+|--------------------|-------------------|-------------------|-------------------|
+| Without Dictionary | 22955767 (44.79%) | 20189613 (39.39% | 19482828 (38.01%) |
+| With Dictionary | 19654568 (38.35%) | 16289357 (31.78%) | 15184589 (29.63%) |
+| Saving/file | 362 bytes | 428 bytes | 472 bytes |
+
+
+### Creating Dictionaries
+
+There are no tools to create dictionaries in S2.
+However, there are multiple ways to create a useful dictionary:
+
+#### Using a Sample File
+
+If your input is very uniform, you can just use a sample file as the dictionary.
+
+For example in the `github_users_sample_set` above, the average compression only goes up from
+10.49% to 11.48% by using the first file as dictionary compared to using a dedicated dictionary.
+
+```Go
+ // Read a sample
+ sample, err := os.ReadFile("sample.json")
+
+ // Create a dictionary.
+ dict := s2.MakeDict(sample, nil)
+
+ // b := dict.Bytes() will provide a dictionary that can be saved
+ // and reloaded with s2.NewDict(b).
+
+ // To encode:
+ encoded := dict.Encode(nil, file)
+
+ // To decode:
+ decoded, err := dict.Decode(nil, file)
+```
+
+#### Using Zstandard
+
+Zstandard dictionaries can easily be converted to S2 dictionaries.
+
+This can be helpful to generate dictionaries for files that don't have a fixed structure.
+
+
+Example, with training set files placed in `./training-set`:
+
+`λ zstd -r --train-fastcover training-set/* --maxdict=65536 -o name.dict`
+
+This will create a dictionary of 64KB, that can be converted to a dictionary like this:
+
+```Go
+ // Decode the Zstandard dictionary.
+ insp, err := zstd.InspectDictionary(zdict)
+ if err != nil {
+ panic(err)
+ }
+
+ // We are only interested in the contents.
+ // Assume that files start with "// Copyright (c) 2023".
+ // Search for the longest match for that.
+ // This may save a few bytes.
+ dict := s2.MakeDict(insp.Content(), []byte("// Copyright (c) 2023"))
+
+ // b := dict.Bytes() will provide a dictionary that can be saved
+ // and reloaded with s2.NewDict(b).
+
+ // We can now encode using this dictionary
+ encodedWithDict := dict.Encode(nil, payload)
+
+ // To decode content:
+ decoded, err := dict.Decode(nil, encodedWithDict)
+```
+
+It is recommended to save the dictionary returned by ` b:= dict.Bytes()`, since that will contain only the S2 dictionary.
+
+This dictionary can later be loaded using `s2.NewDict(b)`. The dictionary then no longer requires `zstd` to be initialized.
+
+Also note how `s2.MakeDict` allows you to search for a common starting sequence of your files.
+This can be omitted, at the expense of a few bytes.
+
+# Snappy Compatibility
+
+S2 now offers full compatibility with Snappy.
+
+This means that the efficient encoders of S2 can be used to generate fully Snappy compatible output.
+
+There is a [snappy](https://github.com/klauspost/compress/tree/master/snappy) package that can be used by
+simply changing imports from `github.com/golang/snappy` to `github.com/klauspost/compress/snappy`.
+This uses "better" mode for all operations.
+If you would like more control, you can use the s2 package as described below:
+
+## Blocks
+
+Snappy compatible blocks can be generated with the S2 encoder.
+Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace
+
+| Snappy | S2 replacement |
+|---------------------------|-----------------------|
+| snappy.Encode(...) | s2.EncodeSnappy(...) |
+| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) |
+
+`s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output.
+
+`s2.ConcatBlocks` is compatible with snappy blocks.
+
+Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z),
+53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used:
+
+| Encoder | Size | MB/s | Reduction |
+|-----------------------|------------|------------|------------|
+| snappy.Encode | 1128706759 | 725.59 | 71.89% |
+| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% |
+| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% |
+| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%** |
+
+## Streams
+
+For streams, replace `enc = snappy.NewBufferedWriter(w)` with `enc = s2.NewWriter(w, s2.WriterSnappyCompat())`.
+All other options are available, but note that block size limit is different for snappy.
+
+Comparison of different streams, AMD Ryzen 3950x, 16 cores. Size and throughput:
+
+| File | snappy.NewWriter | S2 Snappy | S2 Snappy, Better | S2 Snappy, Best |
+|-----------------------------|--------------------------|---------------------------|--------------------------|-------------------------|
+| nyc-taxi-data-10M.csv | 1316042016 - 539.47MB/s | 1307003093 - 10132.73MB/s | 1174534014 - 5002.44MB/s | 1115904679 - 177.97MB/s |
+| enwik10 (xml) | 5088294643 - 451.13MB/s | 5175840939 - 9440.69MB/s | 4560784526 - 4487.21MB/s | 4340299103 - 158.92MB/s |
+| 10gb.tar (mixed) | 6056946612 - 729.73MB/s | 6208571995 - 9978.05MB/s | 5741646126 - 4919.98MB/s | 5548973895 - 180.44MB/s |
+| github-june-2days-2019.json | 1525176492 - 933.00MB/s | 1476519054 - 13150.12MB/s | 1400547532 - 5803.40MB/s | 1321887137 - 204.29MB/s |
+| consensus.db.10gb (db) | 5412897703 - 1102.14MB/s | 5354073487 - 13562.91MB/s | 5335069899 - 5294.73MB/s | 5201000954 - 175.72MB/s |
+
+# Decompression
+
+All decompression functions map directly to equivalent s2 functions.
+
+| Snappy | S2 replacement |
+|------------------------|--------------------|
+| snappy.Decode(...) | s2.Decode(...) |
+| snappy.DecodedLen(...) | s2.DecodedLen(...) |
+| snappy.NewReader(...) | s2.NewReader(...) |
+
+Features like [quick forward skipping without decompression](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.Skip)
+are also available for Snappy streams.
+
+If you know you are only decompressing snappy streams, setting [`ReaderMaxBlockSize(64<<10)`](https://pkg.go.dev/github.com/klauspost/compress/s2#ReaderMaxBlockSize)
+on your Reader will reduce memory consumption.
+
+# Concatenating blocks and streams.
+
+Concatenating streams will concatenate the output of both without recompressing them.
+While this is inefficient in terms of compression it might be usable in certain scenarios.
+The 10 byte 'stream identifier' of the second stream can optionally be stripped, but it is not a requirement.
+
+Blocks can be concatenated using the `ConcatBlocks` function.
+
+Snappy blocks/streams can safely be concatenated with S2 blocks and streams.
+Streams with indexes (see below) will currently not work on concatenated streams.
+
+# Stream Seek Index
+
+S2 and Snappy streams can have indexes. These indexes will allow random seeking within the compressed data.
+
+The index can either be appended to the stream as a skippable block or returned for separate storage.
+
+When the index is appended to a stream it will be skipped by regular decoders,
+so the output remains compatible with other decoders.
+
+## Creating an Index
+
+To automatically add an index to a stream, add `WriterAddIndex()` option to your writer.
+Then the index will be added to the stream when `Close()` is called.
+
+```
+ // Add Index to stream...
+ enc := s2.NewWriter(w, s2.WriterAddIndex())
+ io.Copy(enc, r)
+ enc.Close()
+```
+
+If you want to store the index separately, you can use `CloseIndex()` instead of the regular `Close()`.
+This will return the index. Note that `CloseIndex()` should only be called once, and you shouldn't call `Close()`.
+
+```
+ // Get index for separate storage...
+ enc := s2.NewWriter(w)
+ io.Copy(enc, r)
+ index, err := enc.CloseIndex()
+```
+
+The `index` can then be used needing to read from the stream.
+This means the index can be used without needing to seek to the end of the stream
+or for manually forwarding streams. See below.
+
+Finally, an existing S2/Snappy stream can be indexed using the `s2.IndexStream(r io.Reader)` function.
+
+## Using Indexes
+
+To use indexes there is a `ReadSeeker(random bool, index []byte) (*ReadSeeker, error)` function available.
+
+Calling ReadSeeker will return an [io.ReadSeeker](https://pkg.go.dev/io#ReadSeeker) compatible version of the reader.
+
+If 'random' is specified the returned io.Seeker can be used for random seeking, otherwise only forward seeking is supported.
+Enabling random seeking requires the original input to support the [io.Seeker](https://pkg.go.dev/io#Seeker) interface.
+
+```
+ dec := s2.NewReader(r)
+ rs, err := dec.ReadSeeker(false, nil)
+ rs.Seek(wantOffset, io.SeekStart)
+```
+
+Get a seeker to seek forward. Since no index is provided, the index is read from the stream.
+This requires that an index was added and that `r` supports the [io.Seeker](https://pkg.go.dev/io#Seeker) interface.
+
+A custom index can be specified which will be used if supplied.
+When using a custom index, it will not be read from the input stream.
+
+```
+ dec := s2.NewReader(r)
+ rs, err := dec.ReadSeeker(false, index)
+ rs.Seek(wantOffset, io.SeekStart)
+```
+
+This will read the index from `index`. Since we specify non-random (forward only) seeking `r` does not have to be an io.Seeker
+
+```
+ dec := s2.NewReader(r)
+ rs, err := dec.ReadSeeker(true, index)
+ rs.Seek(wantOffset, io.SeekStart)
+```
+
+Finally, since we specify that we want to do random seeking `r` must be an io.Seeker.
+
+The returned [ReadSeeker](https://pkg.go.dev/github.com/klauspost/compress/s2#ReadSeeker) contains a shallow reference to the existing Reader,
+meaning changes performed to one is reflected in the other.
+
+To check if a stream contains an index at the end, the `(*Index).LoadStream(rs io.ReadSeeker) error` can be used.
+
+## Manually Forwarding Streams
+
+Indexes can also be read outside the decoder using the [Index](https://pkg.go.dev/github.com/klauspost/compress/s2#Index) type.
+This can be used for parsing indexes, either separate or in streams.
+
+In some cases it may not be possible to serve a seekable stream.
+This can for instance be an HTTP stream, where the Range request
+is sent at the start of the stream.
+
+With a little bit of extra code it is still possible to use indexes
+to forward to specific offset with a single forward skip.
+
+It is possible to load the index manually like this:
+```
+ var index s2.Index
+ _, err = index.Load(idxBytes)
+```
+
+This can be used to figure out how much to offset the compressed stream:
+
+```
+ compressedOffset, uncompressedOffset, err := index.Find(wantOffset)
+```
+
+The `compressedOffset` is the number of bytes that should be skipped
+from the beginning of the compressed file.
+
+The `uncompressedOffset` will then be offset of the uncompressed bytes returned
+when decoding from that position. This will always be <= wantOffset.
+
+When creating a decoder it must be specified that it should *not* expect a stream identifier
+at the beginning of the stream. Assuming the io.Reader `r` has been forwarded to `compressedOffset`
+we create the decoder like this:
+
+```
+ dec := s2.NewReader(r, s2.ReaderIgnoreStreamIdentifier())
+```
+
+We are not completely done. We still need to forward the stream the uncompressed bytes we didn't want.
+This is done using the regular "Skip" function:
+
+```
+ err = dec.Skip(wantOffset - uncompressedOffset)
+```
+
+This will ensure that we are at exactly the offset we want, and reading from `dec` will start at the requested offset.
+
+# Compact storage
+
+For compact storage [RemoveIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RemoveIndexHeaders) can be used to remove any redundant info from
+a serialized index. If you remove the header it must be restored before [Loading](https://pkg.go.dev/github.com/klauspost/compress/s2#Index.Load).
+
+This is expected to save 20 bytes. These can be restored using [RestoreIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RestoreIndexHeaders). This removes a layer of security, but is the most compact representation. Returns nil if headers contains errors.
+
+## Index Format:
+
+Each block is structured as a snappy skippable block, with the chunk ID 0x99.
+
+The block can be read from the front, but contains information so it can be read from the back as well.
+
+Numbers are stored as fixed size little endian values or [zigzag encoded](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers) [base 128 varints](https://developers.google.com/protocol-buffers/docs/encoding),
+with un-encoded value length of 64 bits, unless other limits are specified.
+
+| Content | Format |
+|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|
+| ID, `[1]byte` | Always 0x99. |
+| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. |
+| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". |
+| UncompressedSize, Varint | Total Uncompressed size. |
+| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. |
+| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. |
+| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. |
+| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. |
+| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. |
+| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. |
+| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. |
+| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. |
+
+For regular streams the uncompressed offsets are fully predictable,
+so `HasUncompressedOffsets` allows to specify that compressed blocks all have
+exactly `EstBlockSize` bytes of uncompressed content.
+
+Entries *must* be in order, starting with the lowest offset,
+and there *must* be no uncompressed offset duplicates.
+Entries *may* point to the start of a skippable block,
+but it is then not allowed to also have an entry for the next block since
+that would give an uncompressed offset duplicate.
+
+There is no requirement for all blocks to be represented in the index.
+In fact there is a maximum of 65536 block entries in an index.
+
+The writer can use any method to reduce the number of entries.
+An implicit block start at 0,0 can be assumed.
+
+### Decoding entries:
+
+```
+// Read Uncompressed entries.
+// Each assumes EstBlockSize delta from previous.
+for each entry {
+ uOff = 0
+ if HasUncompressedOffsets == 1 {
+ uOff = ReadVarInt // Read value from stream
+ }
+
+ // Except for the first entry, use previous values.
+ if entryNum == 0 {
+ entry[entryNum].UncompressedOffset = uOff
+ continue
+ }
+
+ // Uncompressed uses previous offset and adds EstBlockSize
+ entry[entryNum].UncompressedOffset = entry[entryNum-1].UncompressedOffset + EstBlockSize + uOff
+}
+
+
+// Guess that the first block will be 50% of uncompressed size.
+// Integer truncating division must be used.
+CompressGuess := EstBlockSize / 2
+
+// Read Compressed entries.
+// Each assumes CompressGuess delta from previous.
+// CompressGuess is adjusted for each value.
+for each entry {
+ cOff = ReadVarInt // Read value from stream
+
+ // Except for the first entry, use previous values.
+ if entryNum == 0 {
+ entry[entryNum].CompressedOffset = cOff
+ continue
+ }
+
+ // Compressed uses previous and our estimate.
+ entry[entryNum].CompressedOffset = entry[entryNum-1].CompressedOffset + CompressGuess + cOff
+
+ // Adjust compressed offset for next loop, integer truncating division must be used.
+ CompressGuess += cOff/2
+}
+```
+
+To decode from any given uncompressed offset `(wantOffset)`:
+
+* Iterate entries until `entry[n].UncompressedOffset > wantOffset`.
+* Start decoding from `entry[n-1].CompressedOffset`.
+* Discard `entry[n-1].UncompressedOffset - wantOffset` bytes from the decoded stream.
+
+See [using indexes](https://github.com/klauspost/compress/tree/master/s2#using-indexes) for functions that perform the operations with a simpler interface.
+
+
+# Format Extensions
+
+* Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`.
+* [Framed compressed blocks](https://github.com/google/snappy/blob/master/format_description.txt) can be up to 4MB (up from 64KB).
+* Compressed blocks can have an offset of `0`, which indicates to repeat the last seen offset.
+
+Repeat offsets must be encoded as a [2.2.1. Copy with 1-byte offset (01)](https://github.com/google/snappy/blob/master/format_description.txt#L89), where the offset is 0.
+
+The length is specified by reading the 3-bit length specified in the tag and decode using this table:
+
+| Length | Actual Length |
+|--------|----------------------|
+| 0 | 4 |
+| 1 | 5 |
+| 2 | 6 |
+| 3 | 7 |
+| 4 | 8 |
+| 5 | 8 + read 1 byte |
+| 6 | 260 + read 2 bytes |
+| 7 | 65540 + read 3 bytes |
+
+This allows any repeat offset + length to be represented by 2 to 5 bytes.
+It also allows to emit matches longer than 64 bytes with one copy + one repeat instead of several 64 byte copies.
+
+Lengths are stored as little endian values.
+
+The first copy of a block cannot be a repeat offset and the offset is reset on every block in streams.
+
+Default streaming block size is 1MB.
+
+# Dictionary Encoding
+
+Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks.
+
+A dictionary provides an initial repeat value that can be used to point to a common header.
+
+Other than that the dictionary contains values that can be used as back-references.
+
+Often used data should be placed at the *end* of the dictionary since offsets < 2048 bytes will be smaller.
+
+## Format
+
+Dictionary *content* must at least 16 bytes and less or equal to 64KiB (65536 bytes).
+
+Encoding: `[repeat value (uvarint)][dictionary content...]`
+
+Before the dictionary content, an unsigned base-128 (uvarint) encoded value specifying the initial repeat offset.
+This value is an offset into the dictionary content and not a back-reference offset,
+so setting this to 0 will make the repeat value point to the first value of the dictionary.
+
+The value must be less than the dictionary length-8
+
+## Encoding
+
+From the decoder point of view the dictionary content is seen as preceding the encoded content.
+
+`[dictionary content][decoded output]`
+
+Backreferences to the dictionary are encoded as ordinary backreferences that have an offset before the start of the decoded block.
+
+Matches copying from the dictionary are **not** allowed to cross from the dictionary into the decoded data.
+However, if a copy ends at the end of the dictionary the next repeat will point to the start of the decoded buffer, which is allowed.
+
+The first match can be a repeat value, which will use the repeat offset stored in the dictionary.
+
+When 64KB (65536 bytes) has been en/decoded it is no longer allowed to reference the dictionary,
+neither by a copy nor repeat operations.
+If the boundary is crossed while copying from the dictionary, the operation should complete,
+but the next instruction is not allowed to reference the dictionary.
+
+Valid blocks encoded *without* a dictionary can be decoded with any dictionary.
+There are no checks whether the supplied dictionary is the correct for a block.
+Because of this there is no overhead by using a dictionary.
+
+## Example
+
+This is the dictionary content. Elements are separated by `[]`.
+
+Dictionary: `[0x0a][Yesterday 25 bananas were added to Benjamins brown bag]`.
+
+Initial repeat offset is set at 10, which is the letter `2`.
+
+Encoded `[LIT "10"][REPEAT len=10][LIT "hich"][MATCH off=50 len=6][MATCH off=31 len=6][MATCH off=61 len=10]`
+
+Decoded: `[10][ bananas w][hich][ were ][brown ][were added]`
+
+Output: `10 bananas which were brown were added`
+
+
+## Streams
+
+For streams each block can use the dictionary.
+
+The dictionary cannot not currently be provided on the stream.
+
+
+# LICENSE
+
+This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation.
+
+Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go
new file mode 100644
index 0000000000..6c7feafcc6
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/decode.go
@@ -0,0 +1,437 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "strconv"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = errors.New("s2: corrupt input")
+ // ErrCRC reports that the input failed CRC validation (streams only)
+ ErrCRC = errors.New("s2: corrupt input, crc mismatch")
+ // ErrTooLarge reports that the uncompressed length is too large.
+ ErrTooLarge = errors.New("s2: decoded block is too large")
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = errors.New("s2: unsupported input")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ v, _, err := decodedLen(src)
+ return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n <= 0 || v > 0xffffffff {
+ return 0, 0, ErrCorrupt
+ }
+
+ const wordSize = 32 << (^uint(0) >> 32 & 1)
+ if wordSize == 32 && v > 0x7fffffff {
+ return 0, 0, ErrTooLarge
+ }
+ return int(v), n, nil
+}
+
+const (
+ decodeErrCodeCorrupt = 1
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if dLen <= cap(dst) {
+ dst = dst[:dLen]
+ } else {
+ dst = make([]byte, dLen)
+ }
+ if s2Decode(dst, src[s:]) != 0 {
+ return nil, ErrCorrupt
+ }
+ return dst, nil
+}
+
+// s2DecodeDict writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func s2DecodeDict(dst, src []byte, dict *Dict) int {
+ if dict == nil {
+ return s2Decode(dst, src)
+ }
+ const debug = false
+ const debugErrs = debug
+
+ if debug {
+ fmt.Println("Starting decode, dst len:", len(dst))
+ }
+ var d, s, length int
+ offset := len(dict.dict) - dict.repeat
+
+ // As long as we can read at least 5 bytes...
+ for s < len(src)-5 {
+ // Removing bounds checks is SLOWER, when if doing
+ // in := src[s:s+5]
+ // Checked on Go 1.18
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ x = uint32(src[s-1])
+ case x == 61:
+ in := src[s : s+3]
+ x = uint32(in[1]) | uint32(in[2])<<8
+ s += 3
+ case x == 62:
+ in := src[s : s+4]
+ // Load as 32 bit and shift down.
+ x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
+ x >>= 8
+ s += 4
+ case x == 63:
+ in := src[s : s+5]
+ x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24
+ s += 5
+ }
+ length = int(x) + 1
+ if debug {
+ fmt.Println("literals, length:", length, "d-after:", d+length)
+ }
+ if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
+ if debugErrs {
+ fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s)
+ }
+ return decodeErrCodeCorrupt
+ }
+
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ length = int(src[s-2]) >> 2 & 0x7
+ if toffset == 0 {
+ if debug {
+ fmt.Print("(repeat) ")
+ }
+ // keep last offset
+ switch length {
+ case 5:
+ length = int(src[s]) + 4
+ s += 1
+ case 6:
+ in := src[s : s+2]
+ length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8)
+ s += 2
+ case 7:
+ in := src[s : s+3]
+ length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16)
+ s += 3
+ default: // 0-> 4
+ }
+ } else {
+ offset = toffset
+ }
+ length += 4
+ case tagCopy2:
+ in := src[s : s+3]
+ offset = int(uint32(in[1]) | uint32(in[2])<<8)
+ length = 1 + int(in[0])>>2
+ s += 3
+
+ case tagCopy4:
+ in := src[s : s+5]
+ offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24)
+ length = 1 + int(in[0])>>2
+ s += 5
+ }
+
+ if offset <= 0 || length > len(dst)-d {
+ if debugErrs {
+ fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d)
+ }
+ return decodeErrCodeCorrupt
+ }
+
+ // copy from dict
+ if d < offset {
+ if d > MaxDictSrcOffset {
+ if debugErrs {
+ fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length)
+ }
+ return decodeErrCodeCorrupt
+ }
+ startOff := len(dict.dict) - offset + d
+ if startOff < 0 || startOff+length > len(dict.dict) {
+ if debugErrs {
+ fmt.Printf("offset (%d) + length (%d) bigger than dict (%d)\n", offset, length, len(dict.dict))
+ }
+ return decodeErrCodeCorrupt
+ }
+ if debug {
+ fmt.Println("dict copy, length:", length, "offset:", offset, "d-after:", d+length, "dict start offset:", startOff)
+ }
+ copy(dst[d:d+length], dict.dict[startOff:])
+ d += length
+ continue
+ }
+
+ if debug {
+ fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
+ }
+
+ // Copy from an earlier sub-slice of dst to a later sub-slice.
+ // If no overlap, use the built-in copy:
+ if offset > length {
+ copy(dst[d:d+length], dst[d-offset:])
+ d += length
+ continue
+ }
+
+ // Unlike the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ //
+ // We align the slices into a and b and show the compiler they are the same size.
+ // This allows the loop to run without bounds checks.
+ a := dst[d : d+length]
+ b := dst[d-offset:]
+ b = b[:len(a)]
+ for i := range a {
+ a[i] = b[i]
+ }
+ d += length
+ }
+
+ // Remaining with extra checks...
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-1])
+ case x == 61:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+ length = int(x) + 1
+ if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
+ if debugErrs {
+ fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s)
+ }
+ return decodeErrCodeCorrupt
+ }
+ if debug {
+ fmt.Println("literals, length:", length, "d-after:", d+length)
+ }
+
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = int(src[s-2]) >> 2 & 0x7
+ toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ if toffset == 0 {
+ if debug {
+ fmt.Print("(repeat) ")
+ }
+ // keep last offset
+ switch length {
+ case 5:
+ s += 1
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = int(uint32(src[s-1])) + 4
+ case 6:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8)
+ case 7:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16)
+ default: // 0-> 4
+ }
+ } else {
+ offset = toffset
+ }
+ length += 4
+ case tagCopy2:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+ case tagCopy4:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-5])>>2
+ offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ }
+
+ if offset <= 0 || length > len(dst)-d {
+ if debugErrs {
+ fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d)
+ }
+ return decodeErrCodeCorrupt
+ }
+
+ // copy from dict
+ if d < offset {
+ if d > MaxDictSrcOffset {
+ if debugErrs {
+ fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length)
+ }
+ return decodeErrCodeCorrupt
+ }
+ rOff := len(dict.dict) - (offset - d)
+ if debug {
+ fmt.Println("starting dict entry from dict offset", len(dict.dict)-rOff)
+ }
+ if rOff+length > len(dict.dict) {
+ if debugErrs {
+ fmt.Println("err: END offset", rOff+length, "bigger than dict", len(dict.dict), "dict offset:", rOff, "length:", length)
+ }
+ return decodeErrCodeCorrupt
+ }
+ if rOff < 0 {
+ if debugErrs {
+ fmt.Println("err: START offset", rOff, "less than 0", len(dict.dict), "dict offset:", rOff, "length:", length)
+ }
+ return decodeErrCodeCorrupt
+ }
+ copy(dst[d:d+length], dict.dict[rOff:])
+ d += length
+ continue
+ }
+
+ if debug {
+ fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
+ }
+
+ // Copy from an earlier sub-slice of dst to a later sub-slice.
+ // If no overlap, use the built-in copy:
+ if offset > length {
+ copy(dst[d:d+length], dst[d-offset:])
+ d += length
+ continue
+ }
+
+ // Unlike the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ //
+ // We align the slices into a and b and show the compiler they are the same size.
+ // This allows the loop to run without bounds checks.
+ a := dst[d : d+length]
+ b := dst[d-offset:]
+ b = b[:len(a)]
+ for i := range a {
+ a[i] = b[i]
+ }
+ d += length
+ }
+
+ if d != len(dst) {
+ if debugErrs {
+ fmt.Println("wanted length", len(dst), "got", d)
+ }
+ return decodeErrCodeCorrupt
+ }
+ return 0
+}
diff --git a/vendor/github.com/klauspost/compress/s2/decode_amd64.s b/vendor/github.com/klauspost/compress/s2/decode_amd64.s
new file mode 100644
index 0000000000..9b105e03c5
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/decode_amd64.s
@@ -0,0 +1,568 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+#define R_TMP0 AX
+#define R_TMP1 BX
+#define R_LEN CX
+#define R_OFF DX
+#define R_SRC SI
+#define R_DST DI
+#define R_DBASE R8
+#define R_DLEN R9
+#define R_DEND R10
+#define R_SBASE R11
+#define R_SLEN R12
+#define R_SEND R13
+#define R_TMP2 R14
+#define R_TMP3 R15
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+// - R_TMP0 scratch
+// - R_TMP1 scratch
+// - R_LEN length or x (shared)
+// - R_OFF offset
+// - R_SRC &src[s]
+// - R_DST &dst[d]
+// + R_DBASE dst_base
+// + R_DLEN dst_len
+// + R_DEND dst_base + dst_len
+// + R_SBASE src_base
+// + R_SLEN src_len
+// + R_SEND src_base + src_len
+// - R_TMP2 used by doCopy
+// - R_TMP3 used by doCopy
+//
+// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST.
+// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC.
+TEXT ·s2Decode(SB), NOSPLIT, $48-56
+ // Initialize R_SRC, R_DST and R_DBASE-R_SEND.
+ MOVQ dst_base+0(FP), R_DBASE
+ MOVQ dst_len+8(FP), R_DLEN
+ MOVQ R_DBASE, R_DST
+ MOVQ R_DBASE, R_DEND
+ ADDQ R_DLEN, R_DEND
+ MOVQ src_base+24(FP), R_SBASE
+ MOVQ src_len+32(FP), R_SLEN
+ MOVQ R_SBASE, R_SRC
+ MOVQ R_SBASE, R_SEND
+ ADDQ R_SLEN, R_SEND
+ XORQ R_OFF, R_OFF
+
+loop:
+ // for s < len(src)
+ CMPQ R_SRC, R_SEND
+ JEQ end
+
+ // R_LEN = uint32(src[s])
+ //
+ // switch src[s] & 0x03
+ MOVBLZX (R_SRC), R_LEN
+ MOVL R_LEN, R_TMP1
+ ANDL $3, R_TMP1
+ CMPL R_TMP1, $1
+ JAE tagCopy
+
+ // ----------------------------------------
+ // The code below handles literal tags.
+
+ // case tagLiteral:
+ // x := uint32(src[s] >> 2)
+ // switch
+ SHRL $2, R_LEN
+ CMPL R_LEN, $60
+ JAE tagLit60Plus
+
+ // case x < 60:
+ // s++
+ INCQ R_SRC
+
+doLit:
+ // This is the end of the inner "switch", when we have a literal tag.
+ //
+ // We assume that R_LEN == x and x fits in a uint32, where x is the variable
+ // used in the pure Go decode_other.go code.
+
+ // length = int(x) + 1
+ //
+ // Unlike the pure Go code, we don't need to check if length <= 0 because
+ // R_LEN can hold 64 bits, so the increment cannot overflow.
+ INCQ R_LEN
+
+ // Prepare to check if copying length bytes will run past the end of dst or
+ // src.
+ //
+ // R_TMP0 = len(dst) - d
+ // R_TMP1 = len(src) - s
+ MOVQ R_DEND, R_TMP0
+ SUBQ R_DST, R_TMP0
+ MOVQ R_SEND, R_TMP1
+ SUBQ R_SRC, R_TMP1
+
+ // !!! Try a faster technique for short (16 or fewer bytes) copies.
+ //
+ // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+ // goto callMemmove // Fall back on calling runtime·memmove.
+ // }
+ //
+ // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+ // against 21 instead of 16, because it cannot assume that all of its input
+ // is contiguous in memory and so it needs to leave enough source bytes to
+ // read the next tag without refilling buffers, but Go's Decode assumes
+ // contiguousness (the src argument is a []byte).
+ CMPQ R_LEN, $16
+ JGT callMemmove
+ CMPQ R_TMP0, $16
+ JLT callMemmove
+ CMPQ R_TMP1, $16
+ JLT callMemmove
+
+ // !!! Implement the copy from src to dst as a 16-byte load and store.
+ // (Decode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only length bytes, but that's
+ // OK. If the input is a valid Snappy encoding then subsequent iterations
+ // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+ // non-nil error), so the overrun will be ignored.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(R_SRC), X0
+ MOVOU X0, 0(R_DST)
+
+ // d += length
+ // s += length
+ ADDQ R_LEN, R_DST
+ ADDQ R_LEN, R_SRC
+ JMP loop
+
+callMemmove:
+ // if length > len(dst)-d || length > len(src)-s { etc }
+ CMPQ R_LEN, R_TMP0
+ JGT errCorrupt
+ CMPQ R_LEN, R_TMP1
+ JGT errCorrupt
+
+ // copy(dst[d:], src[s:s+length])
+ //
+ // This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+ // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those
+ // three registers to the stack, to save local variables across the CALL.
+ MOVQ R_DST, 0(SP)
+ MOVQ R_SRC, 8(SP)
+ MOVQ R_LEN, 16(SP)
+ MOVQ R_DST, 24(SP)
+ MOVQ R_SRC, 32(SP)
+ MOVQ R_LEN, 40(SP)
+ MOVQ R_OFF, 48(SP)
+ CALL runtime·memmove(SB)
+
+ // Restore local variables: unspill registers from the stack and
+ // re-calculate R_DBASE-R_SEND.
+ MOVQ 24(SP), R_DST
+ MOVQ 32(SP), R_SRC
+ MOVQ 40(SP), R_LEN
+ MOVQ 48(SP), R_OFF
+ MOVQ dst_base+0(FP), R_DBASE
+ MOVQ dst_len+8(FP), R_DLEN
+ MOVQ R_DBASE, R_DEND
+ ADDQ R_DLEN, R_DEND
+ MOVQ src_base+24(FP), R_SBASE
+ MOVQ src_len+32(FP), R_SLEN
+ MOVQ R_SBASE, R_SEND
+ ADDQ R_SLEN, R_SEND
+
+ // d += length
+ // s += length
+ ADDQ R_LEN, R_DST
+ ADDQ R_LEN, R_SRC
+ JMP loop
+
+tagLit60Plus:
+ // !!! This fragment does the
+ //
+ // s += x - 58; if uint(s) > uint(len(src)) { etc }
+ //
+ // checks. In the asm version, we code it once instead of once per switch case.
+ ADDQ R_LEN, R_SRC
+ SUBQ $58, R_SRC
+ CMPQ R_SRC, R_SEND
+ JA errCorrupt
+
+ // case x == 60:
+ CMPL R_LEN, $61
+ JEQ tagLit61
+ JA tagLit62Plus
+
+ // x = uint32(src[s-1])
+ MOVBLZX -1(R_SRC), R_LEN
+ JMP doLit
+
+tagLit61:
+ // case x == 61:
+ // x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ MOVWLZX -2(R_SRC), R_LEN
+ JMP doLit
+
+tagLit62Plus:
+ CMPL R_LEN, $62
+ JA tagLit63
+
+ // case x == 62:
+ // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ // We read one byte, safe to read one back, since we are just reading tag.
+ // x = binary.LittleEndian.Uint32(src[s-1:]) >> 8
+ MOVL -4(R_SRC), R_LEN
+ SHRL $8, R_LEN
+ JMP doLit
+
+tagLit63:
+ // case x == 63:
+ // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ MOVL -4(R_SRC), R_LEN
+ JMP doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+ // case tagCopy4:
+ // s += 5
+ ADDQ $5, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ CMPQ R_SRC, R_SEND
+ JA errCorrupt
+
+ // length = 1 + int(src[s-5])>>2
+ SHRQ $2, R_LEN
+ INCQ R_LEN
+
+ // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ MOVLQZX -4(R_SRC), R_OFF
+ JMP doCopy
+
+tagCopy2:
+ // case tagCopy2:
+ // s += 3
+ ADDQ $3, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ CMPQ R_SRC, R_SEND
+ JA errCorrupt
+
+ // length = 1 + int(src[s-3])>>2
+ SHRQ $2, R_LEN
+ INCQ R_LEN
+
+ // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+ MOVWQZX -2(R_SRC), R_OFF
+ JMP doCopy
+
+tagCopy:
+ // We have a copy tag. We assume that:
+ // - R_TMP1 == src[s] & 0x03
+ // - R_LEN == src[s]
+ CMPQ R_TMP1, $2
+ JEQ tagCopy2
+ JA tagCopy4
+
+ // case tagCopy1:
+ // s += 2
+ ADDQ $2, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ CMPQ R_SRC, R_SEND
+ JA errCorrupt
+
+ // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ // length = 4 + int(src[s-2])>>2&0x7
+ MOVBQZX -1(R_SRC), R_TMP1
+ MOVQ R_LEN, R_TMP0
+ SHRQ $2, R_LEN
+ ANDQ $0xe0, R_TMP0
+ ANDQ $7, R_LEN
+ SHLQ $3, R_TMP0
+ ADDQ $4, R_LEN
+ ORQ R_TMP1, R_TMP0
+
+ // check if repeat code, ZF set by ORQ.
+ JZ repeatCode
+
+ // This is a regular copy, transfer our temporary value to R_OFF (length)
+ MOVQ R_TMP0, R_OFF
+ JMP doCopy
+
+// This is a repeat code.
+repeatCode:
+ // If length < 9, reuse last offset, with the length already calculated.
+ CMPQ R_LEN, $9
+ JL doCopyRepeat
+
+ // Read additional bytes for length.
+ JE repeatLen1
+
+ // Rare, so the extra branch shouldn't hurt too much.
+ CMPQ R_LEN, $10
+ JE repeatLen2
+ JMP repeatLen3
+
+// Read repeat lengths.
+repeatLen1:
+ // s ++
+ ADDQ $1, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ CMPQ R_SRC, R_SEND
+ JA errCorrupt
+
+ // length = src[s-1] + 8
+ MOVBQZX -1(R_SRC), R_LEN
+ ADDL $8, R_LEN
+ JMP doCopyRepeat
+
+repeatLen2:
+ // s +=2
+ ADDQ $2, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ CMPQ R_SRC, R_SEND
+ JA errCorrupt
+
+ // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + (1 << 8)
+ MOVWQZX -2(R_SRC), R_LEN
+ ADDL $260, R_LEN
+ JMP doCopyRepeat
+
+repeatLen3:
+ // s +=3
+ ADDQ $3, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ CMPQ R_SRC, R_SEND
+ JA errCorrupt
+
+ // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + (1 << 16)
+ // Read one byte further back (just part of the tag, shifted out)
+ MOVL -4(R_SRC), R_LEN
+ SHRL $8, R_LEN
+ ADDL $65540, R_LEN
+ JMP doCopyRepeat
+
+doCopy:
+ // This is the end of the outer "switch", when we have a copy tag.
+ //
+ // We assume that:
+ // - R_LEN == length && R_LEN > 0
+ // - R_OFF == offset
+
+ // if d < offset { etc }
+ MOVQ R_DST, R_TMP1
+ SUBQ R_DBASE, R_TMP1
+ CMPQ R_TMP1, R_OFF
+ JLT errCorrupt
+
+ // Repeat values can skip the test above, since any offset > 0 will be in dst.
+doCopyRepeat:
+ // if offset <= 0 { etc }
+ CMPQ R_OFF, $0
+ JLE errCorrupt
+
+ // if length > len(dst)-d { etc }
+ MOVQ R_DEND, R_TMP1
+ SUBQ R_DST, R_TMP1
+ CMPQ R_LEN, R_TMP1
+ JGT errCorrupt
+
+ // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+ //
+ // Set:
+ // - R_TMP2 = len(dst)-d
+ // - R_TMP3 = &dst[d-offset]
+ MOVQ R_DEND, R_TMP2
+ SUBQ R_DST, R_TMP2
+ MOVQ R_DST, R_TMP3
+ SUBQ R_OFF, R_TMP3
+
+ // !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+ //
+ // First, try using two 8-byte load/stores, similar to the doLit technique
+ // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+ // still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+ // and not one 16-byte load/store, and the first store has to be before the
+ // second load, due to the overlap if offset is in the range [8, 16).
+ //
+ // if length > 16 || offset < 8 || len(dst)-d < 16 {
+ // goto slowForwardCopy
+ // }
+ // copy 16 bytes
+ // d += length
+ CMPQ R_LEN, $16
+ JGT slowForwardCopy
+ CMPQ R_OFF, $8
+ JLT slowForwardCopy
+ CMPQ R_TMP2, $16
+ JLT slowForwardCopy
+ MOVQ 0(R_TMP3), R_TMP0
+ MOVQ R_TMP0, 0(R_DST)
+ MOVQ 8(R_TMP3), R_TMP1
+ MOVQ R_TMP1, 8(R_DST)
+ ADDQ R_LEN, R_DST
+ JMP loop
+
+slowForwardCopy:
+ // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+ // can still try 8-byte load stores, provided we can overrun up to 10 extra
+ // bytes. As above, the overrun will be fixed up by subsequent iterations
+ // of the outermost loop.
+ //
+ // The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+ // commentary says:
+ //
+ // ----
+ //
+ // The main part of this loop is a simple copy of eight bytes at a time
+ // until we've copied (at least) the requested amount of bytes. However,
+ // if d and d-offset are less than eight bytes apart (indicating a
+ // repeating pattern of length < 8), we first need to expand the pattern in
+ // order to get the correct results. For instance, if the buffer looks like
+ // this, with the eight-byte and patterns marked as
+ // intervals:
+ //
+ // abxxxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // a single eight-byte copy from to will repeat the pattern
+ // once, after which we can move two bytes without moving :
+ //
+ // ababxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // and repeat the exercise until the two no longer overlap.
+ //
+ // This allows us to do very well in the special case of one single byte
+ // repeated many times, without taking a big hit for more general cases.
+ //
+ // The worst case of extra writing past the end of the match occurs when
+ // offset == 1 and length == 1; the last copy will read from byte positions
+ // [0..7] and write to [4..11], whereas it was only supposed to write to
+ // position 1. Thus, ten excess bytes.
+ //
+ // ----
+ //
+ // That "10 byte overrun" worst case is confirmed by Go's
+ // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+ // and finishSlowForwardCopy algorithm.
+ //
+ // if length > len(dst)-d-10 {
+ // goto verySlowForwardCopy
+ // }
+ SUBQ $10, R_TMP2
+ CMPQ R_LEN, R_TMP2
+ JGT verySlowForwardCopy
+
+ // We want to keep the offset, so we use R_TMP2 from here.
+ MOVQ R_OFF, R_TMP2
+
+makeOffsetAtLeast8:
+ // !!! As above, expand the pattern so that offset >= 8 and we can use
+ // 8-byte load/stores.
+ //
+ // for offset < 8 {
+ // copy 8 bytes from dst[d-offset:] to dst[d:]
+ // length -= offset
+ // d += offset
+ // offset += offset
+ // // The two previous lines together means that d-offset, and therefore
+ // // R_TMP3, is unchanged.
+ // }
+ CMPQ R_TMP2, $8
+ JGE fixUpSlowForwardCopy
+ MOVQ (R_TMP3), R_TMP1
+ MOVQ R_TMP1, (R_DST)
+ SUBQ R_TMP2, R_LEN
+ ADDQ R_TMP2, R_DST
+ ADDQ R_TMP2, R_TMP2
+ JMP makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+ // !!! Add length (which might be negative now) to d (implied by R_DST being
+ // &dst[d]) so that d ends up at the right place when we jump back to the
+ // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if
+ // length is positive, copying the remaining length bytes will write to the
+ // right place.
+ MOVQ R_DST, R_TMP0
+ ADDQ R_LEN, R_DST
+
+finishSlowForwardCopy:
+ // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+ // length means that we overrun, but as above, that will be fixed up by
+ // subsequent iterations of the outermost loop.
+ CMPQ R_LEN, $0
+ JLE loop
+ MOVQ (R_TMP3), R_TMP1
+ MOVQ R_TMP1, (R_TMP0)
+ ADDQ $8, R_TMP3
+ ADDQ $8, R_TMP0
+ SUBQ $8, R_LEN
+ JMP finishSlowForwardCopy
+
+verySlowForwardCopy:
+ // verySlowForwardCopy is a simple implementation of forward copy. In C
+ // parlance, this is a do/while loop instead of a while loop, since we know
+ // that length > 0. In Go syntax:
+ //
+ // for {
+ // dst[d] = dst[d - offset]
+ // d++
+ // length--
+ // if length == 0 {
+ // break
+ // }
+ // }
+ MOVB (R_TMP3), R_TMP1
+ MOVB R_TMP1, (R_DST)
+ INCQ R_TMP3
+ INCQ R_DST
+ DECQ R_LEN
+ JNZ verySlowForwardCopy
+ JMP loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+ // This is the end of the "for s < len(src)".
+ //
+ // if d != len(dst) { etc }
+ CMPQ R_DST, R_DEND
+ JNE errCorrupt
+
+ // return 0
+ MOVQ $0, ret+48(FP)
+ RET
+
+errCorrupt:
+ // return decodeErrCodeCorrupt
+ MOVQ $1, ret+48(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/s2/decode_arm64.s b/vendor/github.com/klauspost/compress/s2/decode_arm64.s
new file mode 100644
index 0000000000..4b63d5086a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/decode_arm64.s
@@ -0,0 +1,574 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+#define R_TMP0 R2
+#define R_TMP1 R3
+#define R_LEN R4
+#define R_OFF R5
+#define R_SRC R6
+#define R_DST R7
+#define R_DBASE R8
+#define R_DLEN R9
+#define R_DEND R10
+#define R_SBASE R11
+#define R_SLEN R12
+#define R_SEND R13
+#define R_TMP2 R14
+#define R_TMP3 R15
+
+// TEST_SRC will check if R_SRC is <= SRC_END
+#define TEST_SRC() \
+ CMP R_SEND, R_SRC \
+ BGT errCorrupt
+
+// MOVD R_SRC, R_TMP1
+// SUB R_SBASE, R_TMP1, R_TMP1
+// CMP R_SLEN, R_TMP1
+// BGT errCorrupt
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+// - R_TMP0 scratch
+// - R_TMP1 scratch
+// - R_LEN length or x
+// - R_OFF offset
+// - R_SRC &src[s]
+// - R_DST &dst[d]
+// + R_DBASE dst_base
+// + R_DLEN dst_len
+// + R_DEND dst_base + dst_len
+// + R_SBASE src_base
+// + R_SLEN src_len
+// + R_SEND src_base + src_len
+// - R_TMP2 used by doCopy
+// - R_TMP3 used by doCopy
+//
+// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST.
+// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC.
+TEXT ·s2Decode(SB), NOSPLIT, $56-64
+ // Initialize R_SRC, R_DST and R_DBASE-R_SEND.
+ MOVD dst_base+0(FP), R_DBASE
+ MOVD dst_len+8(FP), R_DLEN
+ MOVD R_DBASE, R_DST
+ MOVD R_DBASE, R_DEND
+ ADD R_DLEN, R_DEND, R_DEND
+ MOVD src_base+24(FP), R_SBASE
+ MOVD src_len+32(FP), R_SLEN
+ MOVD R_SBASE, R_SRC
+ MOVD R_SBASE, R_SEND
+ ADD R_SLEN, R_SEND, R_SEND
+ MOVD $0, R_OFF
+
+loop:
+ // for s < len(src)
+ CMP R_SEND, R_SRC
+ BEQ end
+
+ // R_LEN = uint32(src[s])
+ //
+ // switch src[s] & 0x03
+ MOVBU (R_SRC), R_LEN
+ MOVW R_LEN, R_TMP1
+ ANDW $3, R_TMP1
+ MOVW $1, R1
+ CMPW R1, R_TMP1
+ BGE tagCopy
+
+ // ----------------------------------------
+ // The code below handles literal tags.
+
+ // case tagLiteral:
+ // x := uint32(src[s] >> 2)
+ // switch
+ MOVW $60, R1
+ LSRW $2, R_LEN, R_LEN
+ CMPW R_LEN, R1
+ BLS tagLit60Plus
+
+ // case x < 60:
+ // s++
+ ADD $1, R_SRC, R_SRC
+
+doLit:
+ // This is the end of the inner "switch", when we have a literal tag.
+ //
+ // We assume that R_LEN == x and x fits in a uint32, where x is the variable
+ // used in the pure Go decode_other.go code.
+
+ // length = int(x) + 1
+ //
+ // Unlike the pure Go code, we don't need to check if length <= 0 because
+ // R_LEN can hold 64 bits, so the increment cannot overflow.
+ ADD $1, R_LEN, R_LEN
+
+ // Prepare to check if copying length bytes will run past the end of dst or
+ // src.
+ //
+ // R_TMP0 = len(dst) - d
+ // R_TMP1 = len(src) - s
+ MOVD R_DEND, R_TMP0
+ SUB R_DST, R_TMP0, R_TMP0
+ MOVD R_SEND, R_TMP1
+ SUB R_SRC, R_TMP1, R_TMP1
+
+ // !!! Try a faster technique for short (16 or fewer bytes) copies.
+ //
+ // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+ // goto callMemmove // Fall back on calling runtime·memmove.
+ // }
+ //
+ // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+ // against 21 instead of 16, because it cannot assume that all of its input
+ // is contiguous in memory and so it needs to leave enough source bytes to
+ // read the next tag without refilling buffers, but Go's Decode assumes
+ // contiguousness (the src argument is a []byte).
+ CMP $16, R_LEN
+ BGT callMemmove
+ CMP $16, R_TMP0
+ BLT callMemmove
+ CMP $16, R_TMP1
+ BLT callMemmove
+
+ // !!! Implement the copy from src to dst as a 16-byte load and store.
+ // (Decode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only length bytes, but that's
+ // OK. If the input is a valid Snappy encoding then subsequent iterations
+ // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+ // non-nil error), so the overrun will be ignored.
+ //
+ // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ LDP 0(R_SRC), (R_TMP2, R_TMP3)
+ STP (R_TMP2, R_TMP3), 0(R_DST)
+
+ // d += length
+ // s += length
+ ADD R_LEN, R_DST, R_DST
+ ADD R_LEN, R_SRC, R_SRC
+ B loop
+
+callMemmove:
+ // if length > len(dst)-d || length > len(src)-s { etc }
+ CMP R_TMP0, R_LEN
+ BGT errCorrupt
+ CMP R_TMP1, R_LEN
+ BGT errCorrupt
+
+ // copy(dst[d:], src[s:s+length])
+ //
+ // This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+ // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those
+ // three registers to the stack, to save local variables across the CALL.
+ MOVD R_DST, 8(RSP)
+ MOVD R_SRC, 16(RSP)
+ MOVD R_LEN, 24(RSP)
+ MOVD R_DST, 32(RSP)
+ MOVD R_SRC, 40(RSP)
+ MOVD R_LEN, 48(RSP)
+ MOVD R_OFF, 56(RSP)
+ CALL runtime·memmove(SB)
+
+ // Restore local variables: unspill registers from the stack and
+ // re-calculate R_DBASE-R_SEND.
+ MOVD 32(RSP), R_DST
+ MOVD 40(RSP), R_SRC
+ MOVD 48(RSP), R_LEN
+ MOVD 56(RSP), R_OFF
+ MOVD dst_base+0(FP), R_DBASE
+ MOVD dst_len+8(FP), R_DLEN
+ MOVD R_DBASE, R_DEND
+ ADD R_DLEN, R_DEND, R_DEND
+ MOVD src_base+24(FP), R_SBASE
+ MOVD src_len+32(FP), R_SLEN
+ MOVD R_SBASE, R_SEND
+ ADD R_SLEN, R_SEND, R_SEND
+
+ // d += length
+ // s += length
+ ADD R_LEN, R_DST, R_DST
+ ADD R_LEN, R_SRC, R_SRC
+ B loop
+
+tagLit60Plus:
+ // !!! This fragment does the
+ //
+ // s += x - 58; if uint(s) > uint(len(src)) { etc }
+ //
+ // checks. In the asm version, we code it once instead of once per switch case.
+ ADD R_LEN, R_SRC, R_SRC
+ SUB $58, R_SRC, R_SRC
+ TEST_SRC()
+
+ // case x == 60:
+ MOVW $61, R1
+ CMPW R1, R_LEN
+ BEQ tagLit61
+ BGT tagLit62Plus
+
+ // x = uint32(src[s-1])
+ MOVBU -1(R_SRC), R_LEN
+ B doLit
+
+tagLit61:
+ // case x == 61:
+ // x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ MOVHU -2(R_SRC), R_LEN
+ B doLit
+
+tagLit62Plus:
+ CMPW $62, R_LEN
+ BHI tagLit63
+
+ // case x == 62:
+ // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ MOVHU -3(R_SRC), R_LEN
+ MOVBU -1(R_SRC), R_TMP1
+ ORR R_TMP1<<16, R_LEN
+ B doLit
+
+tagLit63:
+ // case x == 63:
+ // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ MOVWU -4(R_SRC), R_LEN
+ B doLit
+
+ // The code above handles literal tags.
+ // ----------------------------------------
+ // The code below handles copy tags.
+
+tagCopy4:
+ // case tagCopy4:
+ // s += 5
+ ADD $5, R_SRC, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVD R_SRC, R_TMP1
+ SUB R_SBASE, R_TMP1, R_TMP1
+ CMP R_SLEN, R_TMP1
+ BGT errCorrupt
+
+ // length = 1 + int(src[s-5])>>2
+ MOVD $1, R1
+ ADD R_LEN>>2, R1, R_LEN
+
+ // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ MOVWU -4(R_SRC), R_OFF
+ B doCopy
+
+tagCopy2:
+ // case tagCopy2:
+ // s += 3
+ ADD $3, R_SRC, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ TEST_SRC()
+
+ // length = 1 + int(src[s-3])>>2
+ MOVD $1, R1
+ ADD R_LEN>>2, R1, R_LEN
+
+ // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+ MOVHU -2(R_SRC), R_OFF
+ B doCopy
+
+tagCopy:
+ // We have a copy tag. We assume that:
+ // - R_TMP1 == src[s] & 0x03
+ // - R_LEN == src[s]
+ CMP $2, R_TMP1
+ BEQ tagCopy2
+ BGT tagCopy4
+
+ // case tagCopy1:
+ // s += 2
+ ADD $2, R_SRC, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ TEST_SRC()
+
+ // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ // Calculate offset in R_TMP0 in case it is a repeat.
+ MOVD R_LEN, R_TMP0
+ AND $0xe0, R_TMP0
+ MOVBU -1(R_SRC), R_TMP1
+ ORR R_TMP0<<3, R_TMP1, R_TMP0
+
+ // length = 4 + int(src[s-2])>>2&0x7
+ MOVD $7, R1
+ AND R_LEN>>2, R1, R_LEN
+ ADD $4, R_LEN, R_LEN
+
+ // check if repeat code with offset 0.
+ CMP $0, R_TMP0
+ BEQ repeatCode
+
+ // This is a regular copy, transfer our temporary value to R_OFF (offset)
+ MOVD R_TMP0, R_OFF
+ B doCopy
+
+ // This is a repeat code.
+repeatCode:
+ // If length < 9, reuse last offset, with the length already calculated.
+ CMP $9, R_LEN
+ BLT doCopyRepeat
+ BEQ repeatLen1
+ CMP $10, R_LEN
+ BEQ repeatLen2
+
+repeatLen3:
+ // s +=3
+ ADD $3, R_SRC, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ TEST_SRC()
+
+ // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + 65540
+ MOVBU -1(R_SRC), R_TMP0
+ MOVHU -3(R_SRC), R_LEN
+ ORR R_TMP0<<16, R_LEN, R_LEN
+ ADD $65540, R_LEN, R_LEN
+ B doCopyRepeat
+
+repeatLen2:
+ // s +=2
+ ADD $2, R_SRC, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ TEST_SRC()
+
+ // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + 260
+ MOVHU -2(R_SRC), R_LEN
+ ADD $260, R_LEN, R_LEN
+ B doCopyRepeat
+
+repeatLen1:
+ // s +=1
+ ADD $1, R_SRC, R_SRC
+
+ // if uint(s) > uint(len(src)) { etc }
+ TEST_SRC()
+
+ // length = src[s-1] + 8
+ MOVBU -1(R_SRC), R_LEN
+ ADD $8, R_LEN, R_LEN
+ B doCopyRepeat
+
+doCopy:
+ // This is the end of the outer "switch", when we have a copy tag.
+ //
+ // We assume that:
+ // - R_LEN == length && R_LEN > 0
+ // - R_OFF == offset
+
+ // if d < offset { etc }
+ MOVD R_DST, R_TMP1
+ SUB R_DBASE, R_TMP1, R_TMP1
+ CMP R_OFF, R_TMP1
+ BLT errCorrupt
+
+ // Repeat values can skip the test above, since any offset > 0 will be in dst.
+doCopyRepeat:
+
+ // if offset <= 0 { etc }
+ CMP $0, R_OFF
+ BLE errCorrupt
+
+ // if length > len(dst)-d { etc }
+ MOVD R_DEND, R_TMP1
+ SUB R_DST, R_TMP1, R_TMP1
+ CMP R_TMP1, R_LEN
+ BGT errCorrupt
+
+ // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+ //
+ // Set:
+ // - R_TMP2 = len(dst)-d
+ // - R_TMP3 = &dst[d-offset]
+ MOVD R_DEND, R_TMP2
+ SUB R_DST, R_TMP2, R_TMP2
+ MOVD R_DST, R_TMP3
+ SUB R_OFF, R_TMP3, R_TMP3
+
+ // !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+ //
+ // First, try using two 8-byte load/stores, similar to the doLit technique
+ // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+ // still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+ // and not one 16-byte load/store, and the first store has to be before the
+ // second load, due to the overlap if offset is in the range [8, 16).
+ //
+ // if length > 16 || offset < 8 || len(dst)-d < 16 {
+ // goto slowForwardCopy
+ // }
+ // copy 16 bytes
+ // d += length
+ CMP $16, R_LEN
+ BGT slowForwardCopy
+ CMP $8, R_OFF
+ BLT slowForwardCopy
+ CMP $16, R_TMP2
+ BLT slowForwardCopy
+ MOVD 0(R_TMP3), R_TMP0
+ MOVD R_TMP0, 0(R_DST)
+ MOVD 8(R_TMP3), R_TMP1
+ MOVD R_TMP1, 8(R_DST)
+ ADD R_LEN, R_DST, R_DST
+ B loop
+
+slowForwardCopy:
+ // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+ // can still try 8-byte load stores, provided we can overrun up to 10 extra
+ // bytes. As above, the overrun will be fixed up by subsequent iterations
+ // of the outermost loop.
+ //
+ // The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+ // commentary says:
+ //
+ // ----
+ //
+ // The main part of this loop is a simple copy of eight bytes at a time
+ // until we've copied (at least) the requested amount of bytes. However,
+ // if d and d-offset are less than eight bytes apart (indicating a
+ // repeating pattern of length < 8), we first need to expand the pattern in
+ // order to get the correct results. For instance, if the buffer looks like
+ // this, with the eight-byte and patterns marked as
+ // intervals:
+ //
+ // abxxxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // a single eight-byte copy from to will repeat the pattern
+ // once, after which we can move two bytes without moving :
+ //
+ // ababxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // and repeat the exercise until the two no longer overlap.
+ //
+ // This allows us to do very well in the special case of one single byte
+ // repeated many times, without taking a big hit for more general cases.
+ //
+ // The worst case of extra writing past the end of the match occurs when
+ // offset == 1 and length == 1; the last copy will read from byte positions
+ // [0..7] and write to [4..11], whereas it was only supposed to write to
+ // position 1. Thus, ten excess bytes.
+ //
+ // ----
+ //
+ // That "10 byte overrun" worst case is confirmed by Go's
+ // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+ // and finishSlowForwardCopy algorithm.
+ //
+ // if length > len(dst)-d-10 {
+ // goto verySlowForwardCopy
+ // }
+ SUB $10, R_TMP2, R_TMP2
+ CMP R_TMP2, R_LEN
+ BGT verySlowForwardCopy
+
+ // We want to keep the offset, so we use R_TMP2 from here.
+ MOVD R_OFF, R_TMP2
+
+makeOffsetAtLeast8:
+ // !!! As above, expand the pattern so that offset >= 8 and we can use
+ // 8-byte load/stores.
+ //
+ // for offset < 8 {
+ // copy 8 bytes from dst[d-offset:] to dst[d:]
+ // length -= offset
+ // d += offset
+ // offset += offset
+ // // The two previous lines together means that d-offset, and therefore
+ // // R_TMP3, is unchanged.
+ // }
+ CMP $8, R_TMP2
+ BGE fixUpSlowForwardCopy
+ MOVD (R_TMP3), R_TMP1
+ MOVD R_TMP1, (R_DST)
+ SUB R_TMP2, R_LEN, R_LEN
+ ADD R_TMP2, R_DST, R_DST
+ ADD R_TMP2, R_TMP2, R_TMP2
+ B makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+ // !!! Add length (which might be negative now) to d (implied by R_DST being
+ // &dst[d]) so that d ends up at the right place when we jump back to the
+ // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if
+ // length is positive, copying the remaining length bytes will write to the
+ // right place.
+ MOVD R_DST, R_TMP0
+ ADD R_LEN, R_DST, R_DST
+
+finishSlowForwardCopy:
+ // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+ // length means that we overrun, but as above, that will be fixed up by
+ // subsequent iterations of the outermost loop.
+ MOVD $0, R1
+ CMP R1, R_LEN
+ BLE loop
+ MOVD (R_TMP3), R_TMP1
+ MOVD R_TMP1, (R_TMP0)
+ ADD $8, R_TMP3, R_TMP3
+ ADD $8, R_TMP0, R_TMP0
+ SUB $8, R_LEN, R_LEN
+ B finishSlowForwardCopy
+
+verySlowForwardCopy:
+ // verySlowForwardCopy is a simple implementation of forward copy. In C
+ // parlance, this is a do/while loop instead of a while loop, since we know
+ // that length > 0. In Go syntax:
+ //
+ // for {
+ // dst[d] = dst[d - offset]
+ // d++
+ // length--
+ // if length == 0 {
+ // break
+ // }
+ // }
+ MOVB (R_TMP3), R_TMP1
+ MOVB R_TMP1, (R_DST)
+ ADD $1, R_TMP3, R_TMP3
+ ADD $1, R_DST, R_DST
+ SUB $1, R_LEN, R_LEN
+ CBNZ R_LEN, verySlowForwardCopy
+ B loop
+
+ // The code above handles copy tags.
+ // ----------------------------------------
+
+end:
+ // This is the end of the "for s < len(src)".
+ //
+ // if d != len(dst) { etc }
+ CMP R_DEND, R_DST
+ BNE errCorrupt
+
+ // return 0
+ MOVD $0, ret+48(FP)
+ RET
+
+errCorrupt:
+ // return decodeErrCodeCorrupt
+ MOVD $1, R_TMP0
+ MOVD R_TMP0, ret+48(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/s2/decode_asm.go b/vendor/github.com/klauspost/compress/s2/decode_asm.go
new file mode 100644
index 0000000000..cb3576edd4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/decode_asm.go
@@ -0,0 +1,17 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (amd64 || arm64) && !appengine && gc && !noasm
+// +build amd64 arm64
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package s2
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func s2Decode(dst, src []byte) int
diff --git a/vendor/github.com/klauspost/compress/s2/decode_other.go b/vendor/github.com/klauspost/compress/s2/decode_other.go
new file mode 100644
index 0000000000..2cb55c2c77
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/decode_other.go
@@ -0,0 +1,292 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (!amd64 && !arm64) || appengine || !gc || noasm
+// +build !amd64,!arm64 appengine !gc noasm
+
+package s2
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func s2Decode(dst, src []byte) int {
+ const debug = false
+ if debug {
+ fmt.Println("Starting decode, dst len:", len(dst))
+ }
+ var d, s, length int
+ offset := 0
+
+ // As long as we can read at least 5 bytes...
+ for s < len(src)-5 {
+ // Removing bounds checks is SLOWER, when if doing
+ // in := src[s:s+5]
+ // Checked on Go 1.18
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ x = uint32(src[s-1])
+ case x == 61:
+ in := src[s : s+3]
+ x = uint32(in[1]) | uint32(in[2])<<8
+ s += 3
+ case x == 62:
+ in := src[s : s+4]
+ // Load as 32 bit and shift down.
+ x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
+ x >>= 8
+ s += 4
+ case x == 63:
+ in := src[s : s+5]
+ x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24
+ s += 5
+ }
+ length = int(x) + 1
+ if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
+ if debug {
+ fmt.Println("corrupt: lit size", length)
+ }
+ return decodeErrCodeCorrupt
+ }
+ if debug {
+ fmt.Println("literals, length:", length, "d-after:", d+length)
+ }
+
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ length = int(src[s-2]) >> 2 & 0x7
+ if toffset == 0 {
+ if debug {
+ fmt.Print("(repeat) ")
+ }
+ // keep last offset
+ switch length {
+ case 5:
+ length = int(src[s]) + 4
+ s += 1
+ case 6:
+ in := src[s : s+2]
+ length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8)
+ s += 2
+ case 7:
+ in := src[s : s+3]
+ length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16)
+ s += 3
+ default: // 0-> 4
+ }
+ } else {
+ offset = toffset
+ }
+ length += 4
+ case tagCopy2:
+ in := src[s : s+3]
+ offset = int(uint32(in[1]) | uint32(in[2])<<8)
+ length = 1 + int(in[0])>>2
+ s += 3
+
+ case tagCopy4:
+ in := src[s : s+5]
+ offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24)
+ length = 1 + int(in[0])>>2
+ s += 5
+ }
+
+ if offset <= 0 || d < offset || length > len(dst)-d {
+ if debug {
+ fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d)
+ }
+
+ return decodeErrCodeCorrupt
+ }
+
+ if debug {
+ fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
+ }
+
+ // Copy from an earlier sub-slice of dst to a later sub-slice.
+ // If no overlap, use the built-in copy:
+ if offset > length {
+ copy(dst[d:d+length], dst[d-offset:])
+ d += length
+ continue
+ }
+
+ // Unlike the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ //
+ // We align the slices into a and b and show the compiler they are the same size.
+ // This allows the loop to run without bounds checks.
+ a := dst[d : d+length]
+ b := dst[d-offset:]
+ b = b[:len(a)]
+ for i := range a {
+ a[i] = b[i]
+ }
+ d += length
+ }
+
+ // Remaining with extra checks...
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-1])
+ case x == 61:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+ length = int(x) + 1
+ if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
+ if debug {
+ fmt.Println("corrupt: lit size", length)
+ }
+ return decodeErrCodeCorrupt
+ }
+ if debug {
+ fmt.Println("literals, length:", length, "d-after:", d+length)
+ }
+
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = int(src[s-2]) >> 2 & 0x7
+ toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ if toffset == 0 {
+ if debug {
+ fmt.Print("(repeat) ")
+ }
+ // keep last offset
+ switch length {
+ case 5:
+ s += 1
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = int(uint32(src[s-1])) + 4
+ case 6:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8)
+ case 7:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16)
+ default: // 0-> 4
+ }
+ } else {
+ offset = toffset
+ }
+ length += 4
+ case tagCopy2:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+ case tagCopy4:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-5])>>2
+ offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ }
+
+ if offset <= 0 || d < offset || length > len(dst)-d {
+ if debug {
+ fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d)
+ }
+ return decodeErrCodeCorrupt
+ }
+
+ if debug {
+ fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
+ }
+
+ // Copy from an earlier sub-slice of dst to a later sub-slice.
+ // If no overlap, use the built-in copy:
+ if offset > length {
+ copy(dst[d:d+length], dst[d-offset:])
+ d += length
+ continue
+ }
+
+ // Unlike the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ //
+ // We align the slices into a and b and show the compiler they are the same size.
+ // This allows the loop to run without bounds checks.
+ a := dst[d : d+length]
+ b := dst[d-offset:]
+ b = b[:len(a)]
+ for i := range a {
+ a[i] = b[i]
+ }
+ d += length
+ }
+
+ if d != len(dst) {
+ return decodeErrCodeCorrupt
+ }
+ return 0
+}
diff --git a/vendor/github.com/klauspost/compress/s2/dict.go b/vendor/github.com/klauspost/compress/s2/dict.go
new file mode 100644
index 0000000000..24f7ce80bc
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/dict.go
@@ -0,0 +1,331 @@
+// Copyright (c) 2022+ Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "sync"
+)
+
+const (
+ // MinDictSize is the minimum dictionary size when repeat has been read.
+ MinDictSize = 16
+
+ // MaxDictSize is the maximum dictionary size when repeat has been read.
+ MaxDictSize = 65536
+
+ // MaxDictSrcOffset is the maximum offset where a dictionary entry can start.
+ MaxDictSrcOffset = 65535
+)
+
+// Dict contains a dictionary that can be used for encoding and decoding s2
+type Dict struct {
+ dict []byte
+ repeat int // Repeat as index of dict
+
+ fast, better, best sync.Once
+ fastTable *[1 << 14]uint16
+
+ betterTableShort *[1 << 14]uint16
+ betterTableLong *[1 << 17]uint16
+
+ bestTableShort *[1 << 16]uint32
+ bestTableLong *[1 << 19]uint32
+}
+
+// NewDict will read a dictionary.
+// It will return nil if the dictionary is invalid.
+func NewDict(dict []byte) *Dict {
+ if len(dict) == 0 {
+ return nil
+ }
+ var d Dict
+ // Repeat is the first value of the dict
+ r, n := binary.Uvarint(dict)
+ if n <= 0 {
+ return nil
+ }
+ dict = dict[n:]
+ d.dict = dict
+ if cap(d.dict) < len(d.dict)+16 {
+ d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
+ }
+ if len(dict) < MinDictSize || len(dict) > MaxDictSize {
+ return nil
+ }
+ d.repeat = int(r)
+ if d.repeat > len(dict) {
+ return nil
+ }
+ return &d
+}
+
+// Bytes will return a serialized version of the dictionary.
+// The output can be sent to NewDict.
+func (d *Dict) Bytes() []byte {
+ dst := make([]byte, binary.MaxVarintLen16+len(d.dict))
+ return append(dst[:binary.PutUvarint(dst, uint64(d.repeat))], d.dict...)
+}
+
+// MakeDict will create a dictionary.
+// 'data' must be at least MinDictSize.
+// If data is longer than MaxDictSize only the last MaxDictSize bytes will be used.
+// If searchStart is set the start repeat value will be set to the last
+// match of this content.
+// If no matches are found, it will attempt to find shorter matches.
+// This content should match the typical start of a block.
+// If at least 4 bytes cannot be matched, repeat is set to start of block.
+func MakeDict(data []byte, searchStart []byte) *Dict {
+ if len(data) == 0 {
+ return nil
+ }
+ if len(data) > MaxDictSize {
+ data = data[len(data)-MaxDictSize:]
+ }
+ var d Dict
+ dict := data
+ d.dict = dict
+ if cap(d.dict) < len(d.dict)+16 {
+ d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
+ }
+ if len(dict) < MinDictSize {
+ return nil
+ }
+
+ // Find the longest match possible, last entry if multiple.
+ for s := len(searchStart); s > 4; s-- {
+ if idx := bytes.LastIndex(data, searchStart[:s]); idx >= 0 && idx <= len(data)-8 {
+ d.repeat = idx
+ break
+ }
+ }
+
+ return &d
+}
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func (d *Dict) Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if cap(dst) < n {
+ dst = make([]byte, n)
+ } else {
+ dst = dst[:n]
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ dstP := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:dstP]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+ }
+ n := encodeBlockDictGo(dst[dstP:], src, d)
+ if n > 0 {
+ dstP += n
+ return dst[:dstP]
+ }
+ // Not compressible
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+}
+
+// EncodeBetter returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// EncodeBetter compresses better than Encode but typically with a
+// 10-40% speed decrease on both compression and decompression.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func (d *Dict) EncodeBetter(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ dstP := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:dstP]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+ }
+ n := encodeBlockBetterDict(dst[dstP:], src, d)
+ if n > 0 {
+ dstP += n
+ return dst[:dstP]
+ }
+ // Not compressible
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+}
+
+// EncodeBest returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// EncodeBest compresses as good as reasonably possible but with a
+// big speed decrease.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func (d *Dict) EncodeBest(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ dstP := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:dstP]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+ }
+ n := encodeBlockBest(dst[dstP:], src, d)
+ if n > 0 {
+ dstP += n
+ return dst[:dstP]
+ }
+ // Not compressible
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+}
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func (d *Dict) Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if dLen <= cap(dst) {
+ dst = dst[:dLen]
+ } else {
+ dst = make([]byte, dLen)
+ }
+ if s2DecodeDict(dst, src[s:], d) != 0 {
+ return nil, ErrCorrupt
+ }
+ return dst, nil
+}
+
+func (d *Dict) initFast() {
+ d.fast.Do(func() {
+ const (
+ tableBits = 14
+ maxTableSize = 1 << tableBits
+ )
+
+ var table [maxTableSize]uint16
+ // We stop so any entry of length 8 can always be read.
+ for i := 0; i < len(d.dict)-8-2; i += 3 {
+ x0 := load64(d.dict, i)
+ h0 := hash6(x0, tableBits)
+ h1 := hash6(x0>>8, tableBits)
+ h2 := hash6(x0>>16, tableBits)
+ table[h0] = uint16(i)
+ table[h1] = uint16(i + 1)
+ table[h2] = uint16(i + 2)
+ }
+ d.fastTable = &table
+ })
+}
+
+func (d *Dict) initBetter() {
+ d.better.Do(func() {
+ const (
+ // Long hash matches.
+ lTableBits = 17
+ maxLTableSize = 1 << lTableBits
+
+ // Short hash matches.
+ sTableBits = 14
+ maxSTableSize = 1 << sTableBits
+ )
+
+ var lTable [maxLTableSize]uint16
+ var sTable [maxSTableSize]uint16
+
+ // We stop so any entry of length 8 can always be read.
+ for i := 0; i < len(d.dict)-8; i++ {
+ cv := load64(d.dict, i)
+ lTable[hash7(cv, lTableBits)] = uint16(i)
+ sTable[hash4(cv, sTableBits)] = uint16(i)
+ }
+ d.betterTableShort = &sTable
+ d.betterTableLong = &lTable
+ })
+}
+
+func (d *Dict) initBest() {
+ d.best.Do(func() {
+ const (
+ // Long hash matches.
+ lTableBits = 19
+ maxLTableSize = 1 << lTableBits
+
+ // Short hash matches.
+ sTableBits = 16
+ maxSTableSize = 1 << sTableBits
+ )
+
+ var lTable [maxLTableSize]uint32
+ var sTable [maxSTableSize]uint32
+
+ // We stop so any entry of length 8 can always be read.
+ for i := 0; i < len(d.dict)-8; i++ {
+ cv := load64(d.dict, i)
+ hashL := hash8(cv, lTableBits)
+ hashS := hash4(cv, sTableBits)
+ candidateL := lTable[hashL]
+ candidateS := sTable[hashS]
+ lTable[hashL] = uint32(i) | candidateL<<16
+ sTable[hashS] = uint32(i) | candidateS<<16
+ }
+ d.bestTableShort = &sTable
+ d.bestTableLong = &lTable
+ })
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encode.go b/vendor/github.com/klauspost/compress/s2/encode.go
new file mode 100644
index 0000000000..e6c2310212
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode.go
@@ -0,0 +1,393 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "encoding/binary"
+ "math"
+ "math/bits"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if cap(dst) < n {
+ dst = make([]byte, n)
+ } else {
+ dst = dst[:n]
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:d]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+ }
+ n := encodeBlock(dst[d:], src)
+ if n > 0 {
+ d += n
+ return dst[:d]
+ }
+ // Not compressible
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+}
+
+// EstimateBlockSize will perform a very fast compression
+// without outputting the result and return the compressed output size.
+// The function returns -1 if no improvement could be achieved.
+// Using actual compression will most often produce better compression than the estimate.
+func EstimateBlockSize(src []byte) (d int) {
+ if len(src) < 6 || int64(len(src)) > 0xffffffff {
+ return -1
+ }
+ if len(src) <= 1024 {
+ d = calcBlockSizeSmall(src)
+ } else {
+ d = calcBlockSize(src)
+ }
+
+ if d == 0 {
+ return -1
+ }
+ // Size of the varint encoded block size.
+ d += (bits.Len64(uint64(len(src))) + 7) / 7
+
+ if d >= len(src) {
+ return -1
+ }
+ return d
+}
+
+// EncodeBetter returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// EncodeBetter compresses better than Encode but typically with a
+// 10-40% speed decrease on both compression and decompression.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func EncodeBetter(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:d]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+ }
+ n := encodeBlockBetter(dst[d:], src)
+ if n > 0 {
+ d += n
+ return dst[:d]
+ }
+ // Not compressible
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+}
+
+// EncodeBest returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// EncodeBest compresses as good as reasonably possible but with a
+// big speed decrease.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func EncodeBest(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:d]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+ }
+ n := encodeBlockBest(dst[d:], src, nil)
+ if n > 0 {
+ d += n
+ return dst[:d]
+ }
+ // Not compressible
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+}
+
+// EncodeSnappy returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The output is Snappy compatible and will likely decompress faster.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func EncodeSnappy(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if cap(dst) < n {
+ dst = make([]byte, n)
+ } else {
+ dst = dst[:n]
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:d]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+ }
+
+ n := encodeBlockSnappy(dst[d:], src)
+ if n > 0 {
+ d += n
+ return dst[:d]
+ }
+ // Not compressible
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+}
+
+// EncodeSnappyBetter returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The output is Snappy compatible and will likely decompress faster.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func EncodeSnappyBetter(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if cap(dst) < n {
+ dst = make([]byte, n)
+ } else {
+ dst = dst[:n]
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:d]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+ }
+
+ n := encodeBlockBetterSnappy(dst[d:], src)
+ if n > 0 {
+ d += n
+ return dst[:d]
+ }
+ // Not compressible
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+}
+
+// EncodeSnappyBest returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The output is Snappy compatible and will likely decompress faster.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func EncodeSnappyBest(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if cap(dst) < n {
+ dst = make([]byte, n)
+ } else {
+ dst = dst[:n]
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:d]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+ }
+
+ n := encodeBlockBestSnappy(dst[d:], src)
+ if n > 0 {
+ d += n
+ return dst[:d]
+ }
+ // Not compressible
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+}
+
+// ConcatBlocks will concatenate the supplied blocks and append them to the supplied destination.
+// If the destination is nil or too small, a new will be allocated.
+// The blocks are not validated, so garbage in = garbage out.
+// dst may not overlap block data.
+// Any data in dst is preserved as is, so it will not be considered a block.
+func ConcatBlocks(dst []byte, blocks ...[]byte) ([]byte, error) {
+ totalSize := uint64(0)
+ compSize := 0
+ for _, b := range blocks {
+ l, hdr, err := decodedLen(b)
+ if err != nil {
+ return nil, err
+ }
+ totalSize += uint64(l)
+ compSize += len(b) - hdr
+ }
+ if totalSize == 0 {
+ dst = append(dst, 0)
+ return dst, nil
+ }
+ if totalSize > math.MaxUint32 {
+ return nil, ErrTooLarge
+ }
+ var tmp [binary.MaxVarintLen32]byte
+ hdrSize := binary.PutUvarint(tmp[:], totalSize)
+ wantSize := hdrSize + compSize
+
+ if cap(dst)-len(dst) < wantSize {
+ dst = append(make([]byte, 0, wantSize+len(dst)), dst...)
+ }
+ dst = append(dst, tmp[:hdrSize]...)
+ for _, b := range blocks {
+ _, hdr, err := decodedLen(b)
+ if err != nil {
+ return nil, err
+ }
+ dst = append(dst, b[hdr:]...)
+ }
+ return dst, nil
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 8
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// will be accepted by the encoder.
+const minNonLiteralBlockSize = 32
+
+const intReduction = 2 - (1 << (^uint(0) >> 63)) // 1 (32 bits) or 0 (64 bits)
+
+// MaxBlockSize is the maximum value where MaxEncodedLen will return a valid block size.
+// Blocks this big are highly discouraged, though.
+// Half the size on 32 bit systems.
+const MaxBlockSize = (1<<(32-intReduction) - 1) - binary.MaxVarintLen32 - 5
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+// 32 bit platforms will have lower thresholds for rejecting big content.
+func MaxEncodedLen(srcLen int) int {
+ n := uint64(srcLen)
+ if intReduction == 1 {
+ // 32 bits
+ if n > math.MaxInt32 {
+ // Also includes negative.
+ return -1
+ }
+ } else if n > 0xffffffff {
+ // 64 bits
+ // Also includes negative.
+ return -1
+ }
+ // Size of the varint encoded block size.
+ n = n + uint64((bits.Len64(n)+7)/7)
+
+ // Add maximum size of encoding block as literals.
+ n += uint64(literalExtraSize(int64(srcLen)))
+ if intReduction == 1 {
+ // 32 bits
+ if n > math.MaxInt32 {
+ return -1
+ }
+ } else if n > 0xffffffff {
+ // 64 bits
+ // Also includes negative.
+ return -1
+ }
+ return int(n)
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go
new file mode 100644
index 0000000000..5e57995d48
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode_all.go
@@ -0,0 +1,1048 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "math/bits"
+)
+
+func load32(b []byte, i int) uint32 {
+ return binary.LittleEndian.Uint32(b[i:])
+}
+
+func load64(b []byte, i int) uint64 {
+ return binary.LittleEndian.Uint64(b[i:])
+}
+
+// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash6(u uint64, h uint8) uint32 {
+ const prime6bytes = 227718039650203
+ return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
+}
+
+func encodeGo(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:d]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+ }
+ n := encodeBlockGo(dst[d:], src)
+ if n > 0 {
+ d += n
+ return dst[:d]
+ }
+ // Not compressible
+ d += emitLiteral(dst[d:], src)
+ return dst[:d]
+}
+
+// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockGo(dst, src []byte) (d int) {
+ // Initialize the hash table.
+ const (
+ tableBits = 14
+ maxTableSize = 1 << tableBits
+
+ debug = false
+ )
+
+ var table [maxTableSize]uint32
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 5
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ cv := load64(src, s)
+
+ // We search for a repeat at -1, but don't output repeats when nextEmit == 0
+ repeat := 1
+
+ for {
+ candidate := 0
+ for {
+ // Next src position to check
+ nextS := s + (s-nextEmit)>>6 + 4
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hash0 := hash6(cv, tableBits)
+ hash1 := hash6(cv>>8, tableBits)
+ candidate = int(table[hash0])
+ candidate2 := int(table[hash1])
+ table[hash0] = uint32(s)
+ table[hash1] = uint32(s + 1)
+ hash2 := hash6(cv>>16, tableBits)
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+
+ // Extend forward
+ candidate := s - repeat + 4 + checkRep
+ s += 4 + checkRep
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ if debug {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+ if nextEmit > 0 {
+ // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
+ d += emitRepeat(dst[d:], repeat, s-base)
+ } else {
+ // First match, cannot be repeat.
+ d += emitCopy(dst[d:], repeat, s-base)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ cv = load64(src, s)
+ continue
+ }
+
+ if uint32(cv) == load32(src, candidate) {
+ break
+ }
+ candidate = int(table[hash2])
+ if uint32(cv>>8) == load32(src, candidate2) {
+ table[hash2] = uint32(s + 2)
+ candidate = candidate2
+ s++
+ break
+ }
+ table[hash2] = uint32(s + 2)
+ if uint32(cv>>16) == load32(src, candidate) {
+ s += 2
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards.
+ // The top bytes will be rechecked to get the full match.
+ for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
+ candidate--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = base - candidate
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidate += 4
+ for s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopy(dst[d:], repeat, s-base)
+ if debug {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Check for an immediate match, otherwise start search at s+1
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>16, tableBits)
+ candidate = int(table[currHash])
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s)
+ if debug && s == candidate {
+ panic("s == candidate")
+ }
+ if uint32(x>>16) != load32(src, candidate) {
+ cv = load64(src, s+1)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
+
+func encodeBlockSnappyGo(dst, src []byte) (d int) {
+ // Initialize the hash table.
+ const (
+ tableBits = 14
+ maxTableSize = 1 << tableBits
+ )
+
+ var table [maxTableSize]uint32
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 5
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ cv := load64(src, s)
+
+ // We search for a repeat at -1, but don't output repeats when nextEmit == 0
+ repeat := 1
+
+ for {
+ candidate := 0
+ for {
+ // Next src position to check
+ nextS := s + (s-nextEmit)>>6 + 4
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hash0 := hash6(cv, tableBits)
+ hash1 := hash6(cv>>8, tableBits)
+ candidate = int(table[hash0])
+ candidate2 := int(table[hash1])
+ table[hash0] = uint32(s)
+ table[hash1] = uint32(s + 1)
+ hash2 := hash6(cv>>16, tableBits)
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+
+ // Extend forward
+ candidate := s - repeat + 4 + checkRep
+ s += 4 + checkRep
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopyNoRepeat(dst[d:], repeat, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ cv = load64(src, s)
+ continue
+ }
+
+ if uint32(cv) == load32(src, candidate) {
+ break
+ }
+ candidate = int(table[hash2])
+ if uint32(cv>>8) == load32(src, candidate2) {
+ table[hash2] = uint32(s + 2)
+ candidate = candidate2
+ s++
+ break
+ }
+ table[hash2] = uint32(s + 2)
+ if uint32(cv>>16) == load32(src, candidate) {
+ s += 2
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards
+ for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
+ candidate--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = base - candidate
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidate += 4
+ for s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopyNoRepeat(dst[d:], repeat, s-base)
+ if false {
+ // Validate match.
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Check for an immediate match, otherwise start search at s+1
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>16, tableBits)
+ candidate = int(table[currHash])
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s)
+ if uint32(x>>16) != load32(src, candidate) {
+ cv = load64(src, s+1)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
+
+// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockDictGo(dst, src []byte, dict *Dict) (d int) {
+ // Initialize the hash table.
+ const (
+ tableBits = 14
+ maxTableSize = 1 << tableBits
+ maxAhead = 8 // maximum bytes ahead without checking sLimit
+
+ debug = false
+ )
+ dict.initFast()
+
+ var table [maxTableSize]uint32
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+ if sLimit > MaxDictSrcOffset-maxAhead {
+ sLimit = MaxDictSrcOffset - maxAhead
+ }
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 5
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form can start with a dict entry (copy or repeat).
+ s := 0
+
+ // Convert dict repeat to offset
+ repeat := len(dict.dict) - dict.repeat
+ cv := load64(src, 0)
+
+ // While in dict
+searchDict:
+ for {
+ // Next src position to check
+ nextS := s + (s-nextEmit)>>6 + 4
+ hash0 := hash6(cv, tableBits)
+ hash1 := hash6(cv>>8, tableBits)
+ if nextS > sLimit {
+ if debug {
+ fmt.Println("slimit reached", s, nextS)
+ }
+ break searchDict
+ }
+ candidateDict := int(dict.fastTable[hash0])
+ candidateDict2 := int(dict.fastTable[hash1])
+ candidate2 := int(table[hash1])
+ candidate := int(table[hash0])
+ table[hash0] = uint32(s)
+ table[hash1] = uint32(s + 1)
+ hash2 := hash6(cv>>16, tableBits)
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+
+ if repeat > s {
+ candidate := len(dict.dict) - repeat + s
+ if repeat-s >= 4 && uint32(cv) == load32(dict.dict, candidate) {
+ // Extend back
+ base := s
+ for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if debug && nextEmit != base {
+ fmt.Println("emitted ", base-nextEmit, "literals")
+ }
+ s += 4
+ candidate += 4
+ for candidate < len(dict.dict)-8 && s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ d += emitRepeat(dst[d:], repeat, s-base)
+ if debug {
+ fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+ cv = load64(src, s)
+ continue
+ }
+ } else if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if debug && nextEmit != base {
+ fmt.Println("emitted ", base-nextEmit, "literals")
+ }
+
+ // Extend forward
+ candidate := s - repeat + 4 + checkRep
+ s += 4 + checkRep
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ if debug {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+
+ if nextEmit > 0 {
+ // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
+ d += emitRepeat(dst[d:], repeat, s-base)
+ } else {
+ // First match, cannot be repeat.
+ d += emitCopy(dst[d:], repeat, s-base)
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+ if debug {
+ fmt.Println("emitted reg repeat", s-base, "s:", s)
+ }
+ cv = load64(src, s)
+ continue searchDict
+ }
+ if s == 0 {
+ cv = load64(src, nextS)
+ s = nextS
+ continue searchDict
+ }
+ // Start with table. These matches will always be closer.
+ if uint32(cv) == load32(src, candidate) {
+ goto emitMatch
+ }
+ candidate = int(table[hash2])
+ if uint32(cv>>8) == load32(src, candidate2) {
+ table[hash2] = uint32(s + 2)
+ candidate = candidate2
+ s++
+ goto emitMatch
+ }
+
+ // Check dict. Dicts have longer offsets, so we want longer matches.
+ if cv == load64(dict.dict, candidateDict) {
+ table[hash2] = uint32(s + 2)
+ goto emitDict
+ }
+
+ candidateDict = int(dict.fastTable[hash2])
+ // Check if upper 7 bytes match
+ if candidateDict2 >= 1 {
+ if cv^load64(dict.dict, candidateDict2-1) < (1 << 8) {
+ table[hash2] = uint32(s + 2)
+ candidateDict = candidateDict2
+ s++
+ goto emitDict
+ }
+ }
+
+ table[hash2] = uint32(s + 2)
+ if uint32(cv>>16) == load32(src, candidate) {
+ s += 2
+ goto emitMatch
+ }
+ if candidateDict >= 2 {
+ // Check if upper 6 bytes match
+ if cv^load64(dict.dict, candidateDict-2) < (1 << 16) {
+ s += 2
+ goto emitDict
+ }
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ continue searchDict
+
+ emitDict:
+ {
+ if debug {
+ if load32(dict.dict, candidateDict) != load32(src, s) {
+ panic("dict emit mismatch")
+ }
+ }
+ // Extend backwards.
+ // The top bytes will be rechecked to get the full match.
+ for candidateDict > 0 && s > nextEmit && dict.dict[candidateDict-1] == src[s-1] {
+ candidateDict--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", s-nextEmit, "literals")
+ }
+ {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = s + (len(dict.dict)) - candidateDict
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidateDict += 4
+ for s <= len(src)-8 && len(dict.dict)-candidateDict >= 8 {
+ if diff := load64(src, s) ^ load64(dict.dict, candidateDict); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidateDict += 8
+ }
+
+ // Matches longer than 64 are split.
+ if s <= sLimit || s-base < 8 {
+ d += emitCopy(dst[d:], repeat, s-base)
+ } else {
+ // Split to ensure we don't start a copy within next block
+ d += emitCopy(dst[d:], repeat, 4)
+ d += emitRepeat(dst[d:], repeat, s-base-4)
+ }
+ if false {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := dict.dict[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+ if debug {
+ fmt.Println("emitted dict copy, length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+
+ // Index and continue loop to try new candidate.
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>8, tableBits)
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s - 1)
+ cv = load64(src, s)
+ }
+ continue
+ }
+ emitMatch:
+
+ // Extend backwards.
+ // The top bytes will be rechecked to get the full match.
+ for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
+ candidate--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", s-nextEmit, "literals")
+ }
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = base - candidate
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidate += 4
+ for s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopy(dst[d:], repeat, s-base)
+ if debug {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+ if debug {
+ fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Check for an immediate match, otherwise start search at s+1
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>16, tableBits)
+ candidate = int(table[currHash])
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s)
+ if debug && s == candidate {
+ panic("s == candidate")
+ }
+ if uint32(x>>16) != load32(src, candidate) {
+ cv = load64(src, s+1)
+ s++
+ break
+ }
+ }
+ }
+
+ // Search without dict:
+ if repeat > s {
+ repeat = 0
+ }
+
+ // No more dict
+ sLimit = len(src) - inputMargin
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ if debug {
+ fmt.Println("non-dict matching at", s, "repeat:", repeat)
+ }
+ cv = load64(src, s)
+ if debug {
+ fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s)
+ }
+ for {
+ candidate := 0
+ for {
+ // Next src position to check
+ nextS := s + (s-nextEmit)>>6 + 4
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hash0 := hash6(cv, tableBits)
+ hash1 := hash6(cv>>8, tableBits)
+ candidate = int(table[hash0])
+ candidate2 := int(table[hash1])
+ table[hash0] = uint32(s)
+ table[hash1] = uint32(s + 1)
+ hash2 := hash6(cv>>16, tableBits)
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ if repeat > 0 && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if debug && nextEmit != base {
+ fmt.Println("emitted ", base-nextEmit, "literals")
+ }
+ // Extend forward
+ candidate := s - repeat + 4 + checkRep
+ s += 4 + checkRep
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ if debug {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+ if nextEmit > 0 {
+ // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
+ d += emitRepeat(dst[d:], repeat, s-base)
+ } else {
+ // First match, cannot be repeat.
+ d += emitCopy(dst[d:], repeat, s-base)
+ }
+ if debug {
+ fmt.Println("emitted src repeat length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ cv = load64(src, s)
+ continue
+ }
+
+ if uint32(cv) == load32(src, candidate) {
+ break
+ }
+ candidate = int(table[hash2])
+ if uint32(cv>>8) == load32(src, candidate2) {
+ table[hash2] = uint32(s + 2)
+ candidate = candidate2
+ s++
+ break
+ }
+ table[hash2] = uint32(s + 2)
+ if uint32(cv>>16) == load32(src, candidate) {
+ s += 2
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards.
+ // The top bytes will be rechecked to get the full match.
+ for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
+ candidate--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", s-nextEmit, "literals")
+ }
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = base - candidate
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidate += 4
+ for s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopy(dst[d:], repeat, s-base)
+ if debug {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+ if debug {
+ fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Check for an immediate match, otherwise start search at s+1
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>16, tableBits)
+ candidate = int(table[currHash])
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s)
+ if debug && s == candidate {
+ panic("s == candidate")
+ }
+ if uint32(x>>16) != load32(src, candidate) {
+ cv = load64(src, s+1)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", len(src)-nextEmit, "literals")
+ }
+ }
+ return d
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go
new file mode 100644
index 0000000000..ebc332ad5f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go
@@ -0,0 +1,148 @@
+//go:build !appengine && !noasm && gc
+// +build !appengine,!noasm,gc
+
+package s2
+
+const hasAmd64Asm = true
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+ const (
+ // Use 12 bit table when less than...
+ limit12B = 16 << 10
+ // Use 10 bit table when less than...
+ limit10B = 4 << 10
+ // Use 8 bit table when less than...
+ limit8B = 512
+ )
+
+ if len(src) >= 4<<20 {
+ return encodeBlockAsm(dst, src)
+ }
+ if len(src) >= limit12B {
+ return encodeBlockAsm4MB(dst, src)
+ }
+ if len(src) >= limit10B {
+ return encodeBlockAsm12B(dst, src)
+ }
+ if len(src) >= limit8B {
+ return encodeBlockAsm10B(dst, src)
+ }
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+ return encodeBlockAsm8B(dst, src)
+}
+
+// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockBetter(dst, src []byte) (d int) {
+ const (
+ // Use 12 bit table when less than...
+ limit12B = 16 << 10
+ // Use 10 bit table when less than...
+ limit10B = 4 << 10
+ // Use 8 bit table when less than...
+ limit8B = 512
+ )
+
+ if len(src) > 4<<20 {
+ return encodeBetterBlockAsm(dst, src)
+ }
+ if len(src) >= limit12B {
+ return encodeBetterBlockAsm4MB(dst, src)
+ }
+ if len(src) >= limit10B {
+ return encodeBetterBlockAsm12B(dst, src)
+ }
+ if len(src) >= limit8B {
+ return encodeBetterBlockAsm10B(dst, src)
+ }
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+ return encodeBetterBlockAsm8B(dst, src)
+}
+
+// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockSnappy(dst, src []byte) (d int) {
+ const (
+ // Use 12 bit table when less than...
+ limit12B = 16 << 10
+ // Use 10 bit table when less than...
+ limit10B = 4 << 10
+ // Use 8 bit table when less than...
+ limit8B = 512
+ )
+ if len(src) >= 64<<10 {
+ return encodeSnappyBlockAsm(dst, src)
+ }
+ if len(src) >= limit12B {
+ return encodeSnappyBlockAsm64K(dst, src)
+ }
+ if len(src) >= limit10B {
+ return encodeSnappyBlockAsm12B(dst, src)
+ }
+ if len(src) >= limit8B {
+ return encodeSnappyBlockAsm10B(dst, src)
+ }
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+ return encodeSnappyBlockAsm8B(dst, src)
+}
+
+// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockBetterSnappy(dst, src []byte) (d int) {
+ const (
+ // Use 12 bit table when less than...
+ limit12B = 16 << 10
+ // Use 10 bit table when less than...
+ limit10B = 4 << 10
+ // Use 8 bit table when less than...
+ limit8B = 512
+ )
+ if len(src) >= 64<<10 {
+ return encodeSnappyBetterBlockAsm(dst, src)
+ }
+ if len(src) >= limit12B {
+ return encodeSnappyBetterBlockAsm64K(dst, src)
+ }
+ if len(src) >= limit10B {
+ return encodeSnappyBetterBlockAsm12B(dst, src)
+ }
+ if len(src) >= limit8B {
+ return encodeSnappyBetterBlockAsm10B(dst, src)
+ }
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+ return encodeSnappyBetterBlockAsm8B(dst, src)
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_best.go b/vendor/github.com/klauspost/compress/s2/encode_best.go
new file mode 100644
index 0000000000..1d13e869a1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode_best.go
@@ -0,0 +1,793 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "fmt"
+ "math"
+ "math/bits"
+)
+
+// encodeBlockBest encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockBest(dst, src []byte, dict *Dict) (d int) {
+ // Initialize the hash tables.
+ const (
+ // Long hash matches.
+ lTableBits = 19
+ maxLTableSize = 1 << lTableBits
+
+ // Short hash matches.
+ sTableBits = 16
+ maxSTableSize = 1 << sTableBits
+
+ inputMargin = 8 + 2
+
+ debug = false
+ )
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+ sLimitDict := len(src) - inputMargin
+ if sLimitDict > MaxDictSrcOffset-inputMargin {
+ sLimitDict = MaxDictSrcOffset - inputMargin
+ }
+
+ var lTable [maxLTableSize]uint64
+ var sTable [maxSTableSize]uint64
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - 5
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ repeat := 1
+ if dict != nil {
+ dict.initBest()
+ s = 0
+ repeat = len(dict.dict) - dict.repeat
+ }
+ cv := load64(src, s)
+
+ // We search for a repeat at -1, but don't output repeats when nextEmit == 0
+ const lowbitMask = 0xffffffff
+ getCur := func(x uint64) int {
+ return int(x & lowbitMask)
+ }
+ getPrev := func(x uint64) int {
+ return int(x >> 32)
+ }
+ const maxSkip = 64
+
+ for {
+ type match struct {
+ offset int
+ s int
+ length int
+ score int
+ rep, dict bool
+ }
+ var best match
+ for {
+ // Next src position to check
+ nextS := (s-nextEmit)>>8 + 1
+ if nextS > maxSkip {
+ nextS = s + maxSkip
+ } else {
+ nextS += s
+ }
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ if dict != nil && s >= MaxDictSrcOffset {
+ dict = nil
+ if repeat > s {
+ repeat = math.MinInt32
+ }
+ }
+ hashL := hash8(cv, lTableBits)
+ hashS := hash4(cv, sTableBits)
+ candidateL := lTable[hashL]
+ candidateS := sTable[hashS]
+
+ score := func(m match) int {
+ // Matches that are longer forward are penalized since we must emit it as a literal.
+ score := m.length - m.s
+ if nextEmit == m.s {
+ // If we do not have to emit literals, we save 1 byte
+ score++
+ }
+ offset := m.s - m.offset
+ if m.rep {
+ return score - emitRepeatSize(offset, m.length)
+ }
+ return score - emitCopySize(offset, m.length)
+ }
+
+ matchAt := func(offset, s int, first uint32, rep bool) match {
+ if best.length != 0 && best.s-best.offset == s-offset {
+ // Don't retest if we have the same offset.
+ return match{offset: offset, s: s}
+ }
+ if load32(src, offset) != first {
+ return match{offset: offset, s: s}
+ }
+ m := match{offset: offset, s: s, length: 4 + offset, rep: rep}
+ s += 4
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[m.length] {
+ m.length++
+ s++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(src, m.length); diff != 0 {
+ m.length += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ m.length += 8
+ }
+ m.length -= offset
+ m.score = score(m)
+ if m.score <= -m.s {
+ // Eliminate if no savings, we might find a better one.
+ m.length = 0
+ }
+ return m
+ }
+ matchDict := func(candidate, s int, first uint32, rep bool) match {
+ // Calculate offset as if in continuous array with s
+ offset := -len(dict.dict) + candidate
+ if best.length != 0 && best.s-best.offset == s-offset && !rep {
+ // Don't retest if we have the same offset.
+ return match{offset: offset, s: s}
+ }
+
+ if load32(dict.dict, candidate) != first {
+ return match{offset: offset, s: s}
+ }
+ m := match{offset: offset, s: s, length: 4 + candidate, rep: rep, dict: true}
+ s += 4
+ if !rep {
+ for s < sLimitDict && m.length < len(dict.dict) {
+ if len(src)-s < 8 || len(dict.dict)-m.length < 8 {
+ if src[s] == dict.dict[m.length] {
+ m.length++
+ s++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 {
+ m.length += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ m.length += 8
+ }
+ } else {
+ for s < len(src) && m.length < len(dict.dict) {
+ if len(src)-s < 8 || len(dict.dict)-m.length < 8 {
+ if src[s] == dict.dict[m.length] {
+ m.length++
+ s++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 {
+ m.length += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ m.length += 8
+ }
+ }
+ m.length -= candidate
+ m.score = score(m)
+ if m.score <= -m.s {
+ // Eliminate if no savings, we might find a better one.
+ m.length = 0
+ }
+ return m
+ }
+
+ bestOf := func(a, b match) match {
+ if b.length == 0 {
+ return a
+ }
+ if a.length == 0 {
+ return b
+ }
+ as := a.score + b.s
+ bs := b.score + a.s
+ if as >= bs {
+ return a
+ }
+ return b
+ }
+
+ if s > 0 {
+ best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false))
+ }
+ if dict != nil {
+ candidateL := dict.bestTableLong[hashL]
+ candidateS := dict.bestTableShort[hashS]
+ best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
+ best = bestOf(best, matchDict(int(candidateL>>16), s, uint32(cv), false))
+ best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
+ best = bestOf(best, matchDict(int(candidateS>>16), s, uint32(cv), false))
+ }
+ {
+ if (dict == nil || repeat <= s) && repeat > 0 {
+ best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true))
+ } else if s-repeat < -4 && dict != nil {
+ candidate := len(dict.dict) - (repeat - s)
+ best = bestOf(best, matchDict(candidate, s, uint32(cv), true))
+ candidate++
+ best = bestOf(best, matchDict(candidate, s+1, uint32(cv>>8), true))
+ }
+
+ if best.length > 0 {
+ hashS := hash4(cv>>8, sTableBits)
+ // s+1
+ nextShort := sTable[hashS]
+ s := s + 1
+ cv := load64(src, s)
+ hashL := hash8(cv, lTableBits)
+ nextLong := lTable[hashL]
+ best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
+
+ // Dict at + 1
+ if dict != nil {
+ candidateL := dict.bestTableLong[hashL]
+ candidateS := dict.bestTableShort[hashS]
+
+ best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
+ best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
+ }
+
+ // s+2
+ if true {
+ hashS := hash4(cv>>8, sTableBits)
+
+ nextShort = sTable[hashS]
+ s++
+ cv = load64(src, s)
+ hashL := hash8(cv, lTableBits)
+ nextLong = lTable[hashL]
+
+ if (dict == nil || repeat <= s) && repeat > 0 {
+ // Repeat at + 2
+ best = bestOf(best, matchAt(s-repeat, s, uint32(cv), true))
+ } else if repeat-s > 4 && dict != nil {
+ candidate := len(dict.dict) - (repeat - s)
+ best = bestOf(best, matchDict(candidate, s, uint32(cv), true))
+ }
+ best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
+
+ // Dict at +2
+ // Very small gain
+ if dict != nil {
+ candidateL := dict.bestTableLong[hashL]
+ candidateS := dict.bestTableShort[hashS]
+
+ best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
+ best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
+ }
+ }
+ // Search for a match at best match end, see if that is better.
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is around 1-2 bytes, but depends on input.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 2
+ const skipEnd = 1
+ if sAt := best.s + best.length - skipEnd; sAt < sLimit {
+
+ sBack := best.s + skipBeginning - skipEnd
+ backL := best.length - skipBeginning
+ // Load initial values
+ cv = load64(src, sBack)
+
+ // Grab candidates...
+ next := lTable[hash8(load64(src, sAt), lTableBits)]
+
+ if checkAt := getCur(next) - backL; checkAt > 0 {
+ best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
+ }
+ if checkAt := getPrev(next) - backL; checkAt > 0 {
+ best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
+ }
+ // Disabled: Extremely small gain
+ if false {
+ next = sTable[hash4(load64(src, sAt), sTableBits)]
+ if checkAt := getCur(next) - backL; checkAt > 0 {
+ best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
+ }
+ if checkAt := getPrev(next) - backL; checkAt > 0 {
+ best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
+ }
+ }
+ }
+ }
+ }
+
+ // Update table
+ lTable[hashL] = uint64(s) | candidateL<<32
+ sTable[hashS] = uint64(s) | candidateS<<32
+
+ if best.length > 0 {
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards, not needed for repeats...
+ s = best.s
+ if !best.rep && !best.dict {
+ for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] {
+ best.offset--
+ best.length++
+ s--
+ }
+ }
+ if false && best.offset >= s {
+ panic(fmt.Errorf("t %d >= s %d", best.offset, s))
+ }
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ base := s
+ offset := s - best.offset
+ s += best.length
+
+ if offset > 65535 && s-base <= 5 && !best.rep {
+ // Bail if the match is equal or worse to the encoding.
+ s = best.s + 1
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ continue
+ }
+ if debug && nextEmit != base {
+ fmt.Println("EMIT", base-nextEmit, "literals. base-after:", base)
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if best.rep {
+ if nextEmit > 0 || best.dict {
+ if debug {
+ fmt.Println("REPEAT, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
+ }
+ // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
+ d += emitRepeat(dst[d:], offset, best.length)
+ } else {
+ // First match without dict cannot be a repeat.
+ if debug {
+ fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
+ }
+ d += emitCopy(dst[d:], offset, best.length)
+ }
+ } else {
+ if debug {
+ fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
+ }
+ d += emitCopy(dst[d:], offset, best.length)
+ }
+ repeat = offset
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Fill tables...
+ for i := best.s + 1; i < s; i++ {
+ cv0 := load64(src, i)
+ long0 := hash8(cv0, lTableBits)
+ short0 := hash4(cv0, sTableBits)
+ lTable[long0] = uint64(i) | lTable[long0]<<32
+ sTable[short0] = uint64(i) | sTable[short0]<<32
+ }
+ cv = load64(src, s)
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", len(src)-nextEmit, "literals")
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
+
+// encodeBlockBestSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockBestSnappy(dst, src []byte) (d int) {
+ // Initialize the hash tables.
+ const (
+ // Long hash matches.
+ lTableBits = 19
+ maxLTableSize = 1 << lTableBits
+
+ // Short hash matches.
+ sTableBits = 16
+ maxSTableSize = 1 << sTableBits
+
+ inputMargin = 8 + 2
+ )
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+
+ var lTable [maxLTableSize]uint64
+ var sTable [maxSTableSize]uint64
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - 5
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ cv := load64(src, s)
+
+ // We search for a repeat at -1, but don't output repeats when nextEmit == 0
+ repeat := 1
+ const lowbitMask = 0xffffffff
+ getCur := func(x uint64) int {
+ return int(x & lowbitMask)
+ }
+ getPrev := func(x uint64) int {
+ return int(x >> 32)
+ }
+ const maxSkip = 64
+
+ for {
+ type match struct {
+ offset int
+ s int
+ length int
+ score int
+ }
+ var best match
+ for {
+ // Next src position to check
+ nextS := (s-nextEmit)>>8 + 1
+ if nextS > maxSkip {
+ nextS = s + maxSkip
+ } else {
+ nextS += s
+ }
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hashL := hash8(cv, lTableBits)
+ hashS := hash4(cv, sTableBits)
+ candidateL := lTable[hashL]
+ candidateS := sTable[hashS]
+
+ score := func(m match) int {
+ // Matches that are longer forward are penalized since we must emit it as a literal.
+ score := m.length - m.s
+ if nextEmit == m.s {
+ // If we do not have to emit literals, we save 1 byte
+ score++
+ }
+ offset := m.s - m.offset
+
+ return score - emitCopyNoRepeatSize(offset, m.length)
+ }
+
+ matchAt := func(offset, s int, first uint32) match {
+ if best.length != 0 && best.s-best.offset == s-offset {
+ // Don't retest if we have the same offset.
+ return match{offset: offset, s: s}
+ }
+ if load32(src, offset) != first {
+ return match{offset: offset, s: s}
+ }
+ m := match{offset: offset, s: s, length: 4 + offset}
+ s += 4
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, m.length); diff != 0 {
+ m.length += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ m.length += 8
+ }
+ m.length -= offset
+ m.score = score(m)
+ if m.score <= -m.s {
+ // Eliminate if no savings, we might find a better one.
+ m.length = 0
+ }
+ return m
+ }
+
+ bestOf := func(a, b match) match {
+ if b.length == 0 {
+ return a
+ }
+ if a.length == 0 {
+ return b
+ }
+ as := a.score + b.s
+ bs := b.score + a.s
+ if as >= bs {
+ return a
+ }
+ return b
+ }
+
+ best = bestOf(matchAt(getCur(candidateL), s, uint32(cv)), matchAt(getPrev(candidateL), s, uint32(cv)))
+ best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv)))
+ best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv)))
+
+ {
+ best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8)))
+ if best.length > 0 {
+ // s+1
+ nextShort := sTable[hash4(cv>>8, sTableBits)]
+ s := s + 1
+ cv := load64(src, s)
+ nextLong := lTable[hash8(cv, lTableBits)]
+ best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv)))
+ best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv)))
+ best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv)))
+ best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv)))
+ // Repeat at + 2
+ best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8)))
+
+ // s+2
+ if true {
+ nextShort = sTable[hash4(cv>>8, sTableBits)]
+ s++
+ cv = load64(src, s)
+ nextLong = lTable[hash8(cv, lTableBits)]
+ best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv)))
+ best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv)))
+ best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv)))
+ best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv)))
+ }
+ // Search for a match at best match end, see if that is better.
+ if sAt := best.s + best.length; sAt < sLimit {
+ sBack := best.s
+ backL := best.length
+ // Load initial values
+ cv = load64(src, sBack)
+ // Search for mismatch
+ next := lTable[hash8(load64(src, sAt), lTableBits)]
+ //next := sTable[hash4(load64(src, sAt), sTableBits)]
+
+ if checkAt := getCur(next) - backL; checkAt > 0 {
+ best = bestOf(best, matchAt(checkAt, sBack, uint32(cv)))
+ }
+ if checkAt := getPrev(next) - backL; checkAt > 0 {
+ best = bestOf(best, matchAt(checkAt, sBack, uint32(cv)))
+ }
+ }
+ }
+ }
+
+ // Update table
+ lTable[hashL] = uint64(s) | candidateL<<32
+ sTable[hashS] = uint64(s) | candidateS<<32
+
+ if best.length > 0 {
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards, not needed for repeats...
+ s = best.s
+ if true {
+ for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] {
+ best.offset--
+ best.length++
+ s--
+ }
+ }
+ if false && best.offset >= s {
+ panic(fmt.Errorf("t %d >= s %d", best.offset, s))
+ }
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ base := s
+ offset := s - best.offset
+
+ s += best.length
+
+ if offset > 65535 && s-base <= 5 {
+ // Bail if the match is equal or worse to the encoding.
+ s = best.s + 1
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ continue
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ d += emitCopyNoRepeat(dst[d:], offset, best.length)
+ repeat = offset
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Fill tables...
+ for i := best.s + 1; i < s; i++ {
+ cv0 := load64(src, i)
+ long0 := hash8(cv0, lTableBits)
+ short0 := hash4(cv0, sTableBits)
+ lTable[long0] = uint64(i) | lTable[long0]<<32
+ sTable[short0] = uint64(i) | sTable[short0]<<32
+ }
+ cv = load64(src, s)
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
+
+// emitCopySize returns the size to encode the offset+length
+//
+// It assumes that:
+//
+// 1 <= offset && offset <= math.MaxUint32
+// 4 <= length && length <= 1 << 24
+func emitCopySize(offset, length int) int {
+ if offset >= 65536 {
+ i := 0
+ if length > 64 {
+ length -= 64
+ if length >= 4 {
+ // Emit remaining as repeats
+ return 5 + emitRepeatSize(offset, length)
+ }
+ i = 5
+ }
+ if length == 0 {
+ return i
+ }
+ return i + 5
+ }
+
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ if offset < 2048 {
+ // Emit 8 bytes, then rest as repeats...
+ return 2 + emitRepeatSize(offset, length-8)
+ }
+ // Emit remaining as repeats, at least 4 bytes remain.
+ return 3 + emitRepeatSize(offset, length-60)
+ }
+ if length >= 12 || offset >= 2048 {
+ return 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ return 2
+}
+
+// emitCopyNoRepeatSize returns the size to encode the offset+length
+//
+// It assumes that:
+//
+// 1 <= offset && offset <= math.MaxUint32
+// 4 <= length && length <= 1 << 24
+func emitCopyNoRepeatSize(offset, length int) int {
+ if offset >= 65536 {
+ return 5 + 5*(length/64)
+ }
+
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ // Emit remaining as repeats, at least 4 bytes remain.
+ return 3 + 3*(length/60)
+ }
+ if length >= 12 || offset >= 2048 {
+ return 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ return 2
+}
+
+// emitRepeatSize returns the number of bytes required to encode a repeat.
+// Length must be at least 4 and < 1<<24
+func emitRepeatSize(offset, length int) int {
+ // Repeat offset, make length cheaper
+ if length <= 4+4 || (length < 8+4 && offset < 2048) {
+ return 2
+ }
+ if length < (1<<8)+4+4 {
+ return 3
+ }
+ if length < (1<<16)+(1<<8)+4 {
+ return 4
+ }
+ const maxRepeat = (1 << 24) - 1
+ length -= (1 << 16) - 4
+ left := 0
+ if length > maxRepeat {
+ left = length - maxRepeat + 4
+ }
+ if left > 0 {
+ return 5 + emitRepeatSize(offset, left)
+ }
+ return 5
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go
new file mode 100644
index 0000000000..544cb1e17b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode_better.go
@@ -0,0 +1,1106 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "bytes"
+ "fmt"
+ "math/bits"
+)
+
+// hash4 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4(u uint64, h uint8) uint32 {
+ const prime4bytes = 2654435761
+ return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
+}
+
+// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash5(u uint64, h uint8) uint32 {
+ const prime5bytes = 889523592379
+ return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63))
+}
+
+// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash7(u uint64, h uint8) uint32 {
+ const prime7bytes = 58295818150454627
+ return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
+}
+
+// hash8 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash8(u uint64, h uint8) uint32 {
+ const prime8bytes = 0xcf1bbcdcb7a56463
+ return uint32((u * prime8bytes) >> ((64 - h) & 63))
+}
+
+// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockBetterGo(dst, src []byte) (d int) {
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+
+ // Initialize the hash tables.
+ const (
+ // Long hash matches.
+ lTableBits = 17
+ maxLTableSize = 1 << lTableBits
+
+ // Short hash matches.
+ sTableBits = 14
+ maxSTableSize = 1 << sTableBits
+ )
+
+ var lTable [maxLTableSize]uint32
+ var sTable [maxSTableSize]uint32
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 6
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ cv := load64(src, s)
+
+ // We initialize repeat to 0, so we never match on first attempt
+ repeat := 0
+
+ for {
+ candidateL := 0
+ nextS := 0
+ for {
+ // Next src position to check
+ nextS = s + (s-nextEmit)>>7 + 1
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hashL := hash7(cv, lTableBits)
+ hashS := hash4(cv, sTableBits)
+ candidateL = int(lTable[hashL])
+ candidateS := int(sTable[hashS])
+ lTable[hashL] = uint32(s)
+ sTable[hashS] = uint32(s)
+
+ valLong := load64(src, candidateL)
+ valShort := load64(src, candidateS)
+
+ // If long matches at least 8 bytes, use that.
+ if cv == valLong {
+ break
+ }
+ if cv == valShort {
+ candidateL = candidateS
+ break
+ }
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ // Minimum length of a repeat. Tested with various values.
+ // While 4-5 offers improvements in some, 6 reduces
+ // regressions significantly.
+ const wantRepeatBytes = 6
+ const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep)
+ if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+
+ // Extend forward
+ candidate := s - repeat + wantRepeatBytes + checkRep
+ s += wantRepeatBytes + checkRep
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[candidate] {
+ s++
+ candidate++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
+ d += emitRepeat(dst[d:], repeat, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ // Index in-between
+ index0 := base + 1
+ index1 := s - 2
+
+ for index0 < index1 {
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 2
+ index1 -= 2
+ }
+
+ cv = load64(src, s)
+ continue
+ }
+
+ // Long likely matches 7, so take that.
+ if uint32(cv) == uint32(valLong) {
+ break
+ }
+
+ // Check our short candidate
+ if uint32(cv) == uint32(valShort) {
+ // Try a long candidate at s+1
+ hashL = hash7(cv>>8, lTableBits)
+ candidateL = int(lTable[hashL])
+ lTable[hashL] = uint32(s + 1)
+ if uint32(cv>>8) == load32(src, candidateL) {
+ s++
+ break
+ }
+ // Use our short candidate.
+ candidateL = candidateS
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards
+ for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
+ candidateL--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ base := s
+ offset := base - candidateL
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidateL += 4
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[candidateL] {
+ s++
+ candidateL++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidateL += 8
+ }
+
+ if offset > 65535 && s-base <= 5 && repeat != offset {
+ // Bail if the match is equal or worse to the encoding.
+ s = nextS + 1
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ continue
+ }
+
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if repeat == offset {
+ d += emitRepeat(dst[d:], offset, s-base)
+ } else {
+ d += emitCopy(dst[d:], offset, s-base)
+ repeat = offset
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+
+ // Index short & long
+ index0 := base + 1
+ index1 := s - 2
+
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ // lTable could be postponed, but very minor difference.
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 1
+ index1 -= 1
+ cv = load64(src, s)
+
+ // Index large values sparsely in between.
+ // We do two starting from different offsets for speed.
+ index2 := (index0 + index1 + 1) >> 1
+ for index2 < index1 {
+ lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
+ lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
+ index0 += 2
+ index2 += 2
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
+
+// encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockBetterSnappyGo(dst, src []byte) (d int) {
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+
+ // Initialize the hash tables.
+ const (
+ // Long hash matches.
+ lTableBits = 16
+ maxLTableSize = 1 << lTableBits
+
+ // Short hash matches.
+ sTableBits = 14
+ maxSTableSize = 1 << sTableBits
+ )
+
+ var lTable [maxLTableSize]uint32
+ var sTable [maxSTableSize]uint32
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 6
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ cv := load64(src, s)
+
+ // We initialize repeat to 0, so we never match on first attempt
+ repeat := 0
+ const maxSkip = 100
+
+ for {
+ candidateL := 0
+ nextS := 0
+ for {
+ // Next src position to check
+ nextS = (s-nextEmit)>>7 + 1
+ if nextS > maxSkip {
+ nextS = s + maxSkip
+ } else {
+ nextS += s
+ }
+
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hashL := hash7(cv, lTableBits)
+ hashS := hash4(cv, sTableBits)
+ candidateL = int(lTable[hashL])
+ candidateS := int(sTable[hashS])
+ lTable[hashL] = uint32(s)
+ sTable[hashS] = uint32(s)
+
+ if uint32(cv) == load32(src, candidateL) {
+ break
+ }
+
+ // Check our short candidate
+ if uint32(cv) == load32(src, candidateS) {
+ // Try a long candidate at s+1
+ hashL = hash7(cv>>8, lTableBits)
+ candidateL = int(lTable[hashL])
+ lTable[hashL] = uint32(s + 1)
+ if uint32(cv>>8) == load32(src, candidateL) {
+ s++
+ break
+ }
+ // Use our short candidate.
+ candidateL = candidateS
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards
+ for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
+ candidateL--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ base := s
+ offset := base - candidateL
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidateL += 4
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[candidateL] {
+ s++
+ candidateL++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidateL += 8
+ }
+
+ if offset > 65535 && s-base <= 5 && repeat != offset {
+ // Bail if the match is equal or worse to the encoding.
+ s = nextS + 1
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ continue
+ }
+
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ d += emitCopyNoRepeat(dst[d:], offset, s-base)
+ repeat = offset
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+
+ // Index short & long
+ index0 := base + 1
+ index1 := s - 2
+
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 1
+ index1 -= 1
+ cv = load64(src, s)
+
+ // Index large values sparsely in between.
+ // We do two starting from different offsets for speed.
+ index2 := (index0 + index1 + 1) >> 1
+ for index2 < index1 {
+ lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
+ lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
+ index0 += 2
+ index2 += 2
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
+
+// encodeBlockBetterDict encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockBetterDict(dst, src []byte, dict *Dict) (d int) {
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ // Initialize the hash tables.
+ const (
+ // Long hash matches.
+ lTableBits = 17
+ maxLTableSize = 1 << lTableBits
+
+ // Short hash matches.
+ sTableBits = 14
+ maxSTableSize = 1 << sTableBits
+
+ maxAhead = 8 // maximum bytes ahead without checking sLimit
+
+ debug = false
+ )
+
+ sLimit := len(src) - inputMargin
+ if sLimit > MaxDictSrcOffset-maxAhead {
+ sLimit = MaxDictSrcOffset - maxAhead
+ }
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+
+ dict.initBetter()
+
+ var lTable [maxLTableSize]uint32
+ var sTable [maxSTableSize]uint32
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 6
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 0
+ cv := load64(src, s)
+
+ // We initialize repeat to 0, so we never match on first attempt
+ repeat := len(dict.dict) - dict.repeat
+
+ // While in dict
+searchDict:
+ for {
+ candidateL := 0
+ nextS := 0
+ for {
+ // Next src position to check
+ nextS = s + (s-nextEmit)>>7 + 1
+ if nextS > sLimit {
+ break searchDict
+ }
+ hashL := hash7(cv, lTableBits)
+ hashS := hash4(cv, sTableBits)
+ candidateL = int(lTable[hashL])
+ candidateS := int(sTable[hashS])
+ dictL := int(dict.betterTableLong[hashL])
+ dictS := int(dict.betterTableShort[hashS])
+ lTable[hashL] = uint32(s)
+ sTable[hashS] = uint32(s)
+
+ valLong := load64(src, candidateL)
+ valShort := load64(src, candidateS)
+
+ // If long matches at least 8 bytes, use that.
+ if s != 0 {
+ if cv == valLong {
+ goto emitMatch
+ }
+ if cv == valShort {
+ candidateL = candidateS
+ goto emitMatch
+ }
+ }
+
+ // Check dict repeat.
+ if repeat >= s+4 {
+ candidate := len(dict.dict) - repeat + s
+ if candidate > 0 && uint32(cv) == load32(dict.dict, candidate) {
+ // Extend back
+ base := s
+ for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if debug && nextEmit != base {
+ fmt.Println("emitted ", base-nextEmit, "literals")
+ }
+ s += 4
+ candidate += 4
+ for candidate < len(dict.dict)-8 && s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ d += emitRepeat(dst[d:], repeat, s-base)
+ if debug {
+ fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+ // Index in-between
+ index0 := base + 1
+ index1 := s - 2
+
+ cv = load64(src, s)
+ for index0 < index1 {
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 2
+ index1 -= 2
+ }
+ continue
+ }
+ }
+ // Don't try to find match at s==0
+ if s == 0 {
+ cv = load64(src, nextS)
+ s = nextS
+ continue
+ }
+
+ // Long likely matches 7, so take that.
+ if uint32(cv) == uint32(valLong) {
+ goto emitMatch
+ }
+
+ // Long dict...
+ if uint32(cv) == load32(dict.dict, dictL) {
+ candidateL = dictL
+ goto emitDict
+ }
+
+ // Check our short candidate
+ if uint32(cv) == uint32(valShort) {
+ // Try a long candidate at s+1
+ hashL = hash7(cv>>8, lTableBits)
+ candidateL = int(lTable[hashL])
+ lTable[hashL] = uint32(s + 1)
+ if uint32(cv>>8) == load32(src, candidateL) {
+ s++
+ goto emitMatch
+ }
+ // Use our short candidate.
+ candidateL = candidateS
+ goto emitMatch
+ }
+ if uint32(cv) == load32(dict.dict, dictS) {
+ // Try a long candidate at s+1
+ hashL = hash7(cv>>8, lTableBits)
+ candidateL = int(lTable[hashL])
+ lTable[hashL] = uint32(s + 1)
+ if uint32(cv>>8) == load32(src, candidateL) {
+ s++
+ goto emitMatch
+ }
+ candidateL = dictS
+ goto emitDict
+ }
+ cv = load64(src, nextS)
+ s = nextS
+ }
+ emitDict:
+ {
+ if debug {
+ if load32(dict.dict, candidateL) != load32(src, s) {
+ panic("dict emit mismatch")
+ }
+ }
+ // Extend backwards.
+ // The top bytes will be rechecked to get the full match.
+ for candidateL > 0 && s > nextEmit && dict.dict[candidateL-1] == src[s-1] {
+ candidateL--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", s-nextEmit, "literals")
+ }
+ {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ offset := s + (len(dict.dict)) - candidateL
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidateL += 4
+ for s <= len(src)-8 && len(dict.dict)-candidateL >= 8 {
+ if diff := load64(src, s) ^ load64(dict.dict, candidateL); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidateL += 8
+ }
+
+ if repeat == offset {
+ if debug {
+ fmt.Println("emitted dict repeat, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL)
+ }
+ d += emitRepeat(dst[d:], offset, s-base)
+ } else {
+ if debug {
+ fmt.Println("emitted dict copy, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL)
+ }
+ // Matches longer than 64 are split.
+ if s <= sLimit || s-base < 8 {
+ d += emitCopy(dst[d:], offset, s-base)
+ } else {
+ // Split to ensure we don't start a copy within next block.
+ d += emitCopy(dst[d:], offset, 4)
+ d += emitRepeat(dst[d:], offset, s-base-4)
+ }
+ repeat = offset
+ }
+ if false {
+ // Validate match.
+ if s <= candidateL {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := dict.dict[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+
+ // Index short & long
+ index0 := base + 1
+ index1 := s - 2
+
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 1
+ index1 -= 1
+ cv = load64(src, s)
+
+ // index every second long in between.
+ for index0 < index1 {
+ lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
+ lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
+ index0 += 2
+ index1 -= 2
+ }
+ }
+ continue
+ }
+ emitMatch:
+
+ // Extend backwards
+ for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
+ candidateL--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ base := s
+ offset := base - candidateL
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidateL += 4
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[candidateL] {
+ s++
+ candidateL++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidateL += 8
+ }
+
+ if offset > 65535 && s-base <= 5 && repeat != offset {
+ // Bail if the match is equal or worse to the encoding.
+ s = nextS + 1
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ continue
+ }
+
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", s-nextEmit, "literals")
+ }
+ if repeat == offset {
+ if debug {
+ fmt.Println("emitted match repeat, length", s-base, "offset:", offset, "s:", s)
+ }
+ d += emitRepeat(dst[d:], offset, s-base)
+ } else {
+ if debug {
+ fmt.Println("emitted match copy, length", s-base, "offset:", offset, "s:", s)
+ }
+ d += emitCopy(dst[d:], offset, s-base)
+ repeat = offset
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+
+ // Index short & long
+ index0 := base + 1
+ index1 := s - 2
+
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 1
+ index1 -= 1
+ cv = load64(src, s)
+
+ // Index large values sparsely in between.
+ // We do two starting from different offsets for speed.
+ index2 := (index0 + index1 + 1) >> 1
+ for index2 < index1 {
+ lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
+ lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
+ index0 += 2
+ index2 += 2
+ }
+ }
+
+ // Search without dict:
+ if repeat > s {
+ repeat = 0
+ }
+
+ // No more dict
+ sLimit = len(src) - inputMargin
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ if debug {
+ fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s)
+ }
+ for {
+ candidateL := 0
+ nextS := 0
+ for {
+ // Next src position to check
+ nextS = s + (s-nextEmit)>>7 + 1
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hashL := hash7(cv, lTableBits)
+ hashS := hash4(cv, sTableBits)
+ candidateL = int(lTable[hashL])
+ candidateS := int(sTable[hashS])
+ lTable[hashL] = uint32(s)
+ sTable[hashS] = uint32(s)
+
+ valLong := load64(src, candidateL)
+ valShort := load64(src, candidateS)
+
+ // If long matches at least 8 bytes, use that.
+ if cv == valLong {
+ break
+ }
+ if cv == valShort {
+ candidateL = candidateS
+ break
+ }
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ // Minimum length of a repeat. Tested with various values.
+ // While 4-5 offers improvements in some, 6 reduces
+ // regressions significantly.
+ const wantRepeatBytes = 6
+ const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep)
+ if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+
+ // Extend forward
+ candidate := s - repeat + wantRepeatBytes + checkRep
+ s += wantRepeatBytes + checkRep
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[candidate] {
+ s++
+ candidate++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
+ d += emitRepeat(dst[d:], repeat, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ // Index in-between
+ index0 := base + 1
+ index1 := s - 2
+
+ for index0 < index1 {
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 2
+ index1 -= 2
+ }
+
+ cv = load64(src, s)
+ continue
+ }
+
+ // Long likely matches 7, so take that.
+ if uint32(cv) == uint32(valLong) {
+ break
+ }
+
+ // Check our short candidate
+ if uint32(cv) == uint32(valShort) {
+ // Try a long candidate at s+1
+ hashL = hash7(cv>>8, lTableBits)
+ candidateL = int(lTable[hashL])
+ lTable[hashL] = uint32(s + 1)
+ if uint32(cv>>8) == load32(src, candidateL) {
+ s++
+ break
+ }
+ // Use our short candidate.
+ candidateL = candidateS
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards
+ for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
+ candidateL--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ base := s
+ offset := base - candidateL
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidateL += 4
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[candidateL] {
+ s++
+ candidateL++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidateL += 8
+ }
+
+ if offset > 65535 && s-base <= 5 && repeat != offset {
+ // Bail if the match is equal or worse to the encoding.
+ s = nextS + 1
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ continue
+ }
+
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if repeat == offset {
+ d += emitRepeat(dst[d:], offset, s-base)
+ } else {
+ d += emitCopy(dst[d:], offset, s-base)
+ repeat = offset
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+
+ // Index short & long
+ index0 := base + 1
+ index1 := s - 2
+
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 1
+ index1 -= 1
+ cv = load64(src, s)
+
+ // Index large values sparsely in between.
+ // We do two starting from different offsets for speed.
+ index2 := (index0 + index1 + 1) >> 1
+ for index2 < index1 {
+ lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
+ lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
+ index0 += 2
+ index2 += 2
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go
new file mode 100644
index 0000000000..0d39c7b0e0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encode_go.go
@@ -0,0 +1,727 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+package s2
+
+import (
+ "bytes"
+ "math/bits"
+)
+
+const hasAmd64Asm = false
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src))
+func encodeBlock(dst, src []byte) (d int) {
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+ return encodeBlockGo(dst, src)
+}
+
+// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src))
+func encodeBlockBetter(dst, src []byte) (d int) {
+ return encodeBlockBetterGo(dst, src)
+}
+
+// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src))
+func encodeBlockBetterSnappy(dst, src []byte) (d int) {
+ return encodeBlockBetterSnappyGo(dst, src)
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src))
+func encodeBlockSnappy(dst, src []byte) (d int) {
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+ return encodeBlockSnappyGo(dst, src)
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 0 <= len(lit) && len(lit) <= math.MaxUint32
+func emitLiteral(dst, lit []byte) int {
+ if len(lit) == 0 {
+ return 0
+ }
+ const num = 63<<2 | tagLiteral
+ i, n := 0, uint(len(lit)-1)
+ switch {
+ case n < 60:
+ dst[0] = uint8(n)<<2 | tagLiteral
+ i = 1
+ case n < 1<<8:
+ dst[1] = uint8(n)
+ dst[0] = 60<<2 | tagLiteral
+ i = 2
+ case n < 1<<16:
+ dst[2] = uint8(n >> 8)
+ dst[1] = uint8(n)
+ dst[0] = 61<<2 | tagLiteral
+ i = 3
+ case n < 1<<24:
+ dst[3] = uint8(n >> 16)
+ dst[2] = uint8(n >> 8)
+ dst[1] = uint8(n)
+ dst[0] = 62<<2 | tagLiteral
+ i = 4
+ default:
+ dst[4] = uint8(n >> 24)
+ dst[3] = uint8(n >> 16)
+ dst[2] = uint8(n >> 8)
+ dst[1] = uint8(n)
+ dst[0] = 63<<2 | tagLiteral
+ i = 5
+ }
+ return i + copy(dst[i:], lit)
+}
+
+// emitRepeat writes a repeat chunk and returns the number of bytes written.
+// Length must be at least 4 and < 1<<24
+func emitRepeat(dst []byte, offset, length int) int {
+ // Repeat offset, make length cheaper
+ length -= 4
+ if length <= 4 {
+ dst[0] = uint8(length)<<2 | tagCopy1
+ dst[1] = 0
+ return 2
+ }
+ if length < 8 && offset < 2048 {
+ // Encode WITH offset
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
+ return 2
+ }
+ if length < (1<<8)+4 {
+ length -= 4
+ dst[2] = uint8(length)
+ dst[1] = 0
+ dst[0] = 5<<2 | tagCopy1
+ return 3
+ }
+ if length < (1<<16)+(1<<8) {
+ length -= 1 << 8
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 6<<2 | tagCopy1
+ return 4
+ }
+ const maxRepeat = (1 << 24) - 1
+ length -= 1 << 16
+ left := 0
+ if length > maxRepeat {
+ left = length - maxRepeat + 4
+ length = maxRepeat - 4
+ }
+ dst[4] = uint8(length >> 16)
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 7<<2 | tagCopy1
+ if left > 0 {
+ return 5 + emitRepeat(dst[5:], offset, left)
+ }
+ return 5
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= math.MaxUint32
+// 4 <= length && length <= 1 << 24
+func emitCopy(dst []byte, offset, length int) int {
+ if offset >= 65536 {
+ i := 0
+ if length > 64 {
+ // Emit a length 64 copy, encoded as 5 bytes.
+ dst[4] = uint8(offset >> 24)
+ dst[3] = uint8(offset >> 16)
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = 63<<2 | tagCopy4
+ length -= 64
+ if length >= 4 {
+ // Emit remaining as repeats
+ return 5 + emitRepeat(dst[5:], offset, length)
+ }
+ i = 5
+ }
+ if length == 0 {
+ return i
+ }
+ // Emit a copy, offset encoded as 4 bytes.
+ dst[i+0] = uint8(length-1)<<2 | tagCopy4
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ dst[i+3] = uint8(offset >> 16)
+ dst[i+4] = uint8(offset >> 24)
+ return i + 5
+ }
+
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ off := 3
+ if offset < 2048 {
+ // emit 8 bytes as tagCopy1, rest as repeats.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
+ length -= 8
+ off = 2
+ } else {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ // Emit remaining as repeat value (minimum 4 bytes).
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = 59<<2 | tagCopy2
+ length -= 60
+ }
+ // Emit remaining as repeats, at least 4 bytes remain.
+ return off + emitRepeat(dst[off:], offset, length)
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = uint8(length-1)<<2 | tagCopy2
+ return 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ return 2
+}
+
+// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= math.MaxUint32
+// 4 <= length && length <= 1 << 24
+func emitCopyNoRepeat(dst []byte, offset, length int) int {
+ if offset >= 65536 {
+ i := 0
+ if length > 64 {
+ // Emit a length 64 copy, encoded as 5 bytes.
+ dst[4] = uint8(offset >> 24)
+ dst[3] = uint8(offset >> 16)
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = 63<<2 | tagCopy4
+ length -= 64
+ if length >= 4 {
+ // Emit remaining as repeats
+ return 5 + emitCopyNoRepeat(dst[5:], offset, length)
+ }
+ i = 5
+ }
+ if length == 0 {
+ return i
+ }
+ // Emit a copy, offset encoded as 4 bytes.
+ dst[i+0] = uint8(length-1)<<2 | tagCopy4
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ dst[i+3] = uint8(offset >> 16)
+ dst[i+4] = uint8(offset >> 24)
+ return i + 5
+ }
+
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ // Emit remaining as repeat value (minimum 4 bytes).
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = 59<<2 | tagCopy2
+ length -= 60
+ // Emit remaining as repeats, at least 4 bytes remain.
+ return 3 + emitCopyNoRepeat(dst[3:], offset, length)
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = uint8(length-1)<<2 | tagCopy2
+ return 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ return 2
+}
+
+// matchLen returns how many bytes match in a and b
+//
+// It assumes that:
+//
+// len(a) <= len(b)
+func matchLen(a []byte, b []byte) int {
+ b = b[:len(a)]
+ var checked int
+ if len(a) > 4 {
+ // Try 4 bytes first
+ if diff := load32(a, 0) ^ load32(b, 0); diff != 0 {
+ return bits.TrailingZeros32(diff) >> 3
+ }
+ // Switch to 8 byte matching.
+ checked = 4
+ a = a[4:]
+ b = b[4:]
+ for len(a) >= 8 {
+ b = b[:len(a)]
+ if diff := load64(a, 0) ^ load64(b, 0); diff != 0 {
+ return checked + (bits.TrailingZeros64(diff) >> 3)
+ }
+ checked += 8
+ a = a[8:]
+ b = b[8:]
+ }
+ }
+ b = b[:len(a)]
+ for i := range a {
+ if a[i] != b[i] {
+ return int(i) + checked
+ }
+ }
+ return len(a) + checked
+}
+
+func calcBlockSize(src []byte) (d int) {
+ // Initialize the hash table.
+ const (
+ tableBits = 13
+ maxTableSize = 1 << tableBits
+ )
+
+ var table [maxTableSize]uint32
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 5
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ cv := load64(src, s)
+
+ // We search for a repeat at -1, but don't output repeats when nextEmit == 0
+ repeat := 1
+
+ for {
+ candidate := 0
+ for {
+ // Next src position to check
+ nextS := s + (s-nextEmit)>>6 + 4
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hash0 := hash6(cv, tableBits)
+ hash1 := hash6(cv>>8, tableBits)
+ candidate = int(table[hash0])
+ candidate2 := int(table[hash1])
+ table[hash0] = uint32(s)
+ table[hash1] = uint32(s + 1)
+ hash2 := hash6(cv>>16, tableBits)
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteralSize(src[nextEmit:base])
+
+ // Extend forward
+ candidate := s - repeat + 4 + checkRep
+ s += 4 + checkRep
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopyNoRepeatSize(repeat, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ cv = load64(src, s)
+ continue
+ }
+
+ if uint32(cv) == load32(src, candidate) {
+ break
+ }
+ candidate = int(table[hash2])
+ if uint32(cv>>8) == load32(src, candidate2) {
+ table[hash2] = uint32(s + 2)
+ candidate = candidate2
+ s++
+ break
+ }
+ table[hash2] = uint32(s + 2)
+ if uint32(cv>>16) == load32(src, candidate) {
+ s += 2
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards
+ for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
+ candidate--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteralSize(src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = base - candidate
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidate += 4
+ for s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopyNoRepeatSize(repeat, s-base)
+ if false {
+ // Validate match.
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Check for an immediate match, otherwise start search at s+1
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>16, tableBits)
+ candidate = int(table[currHash])
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s)
+ if uint32(x>>16) != load32(src, candidate) {
+ cv = load64(src, s+1)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteralSize(src[nextEmit:])
+ }
+ return d
+}
+
+func calcBlockSizeSmall(src []byte) (d int) {
+ // Initialize the hash table.
+ const (
+ tableBits = 9
+ maxTableSize = 1 << tableBits
+ )
+
+ var table [maxTableSize]uint32
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 5
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ cv := load64(src, s)
+
+ // We search for a repeat at -1, but don't output repeats when nextEmit == 0
+ repeat := 1
+
+ for {
+ candidate := 0
+ for {
+ // Next src position to check
+ nextS := s + (s-nextEmit)>>6 + 4
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hash0 := hash6(cv, tableBits)
+ hash1 := hash6(cv>>8, tableBits)
+ candidate = int(table[hash0])
+ candidate2 := int(table[hash1])
+ table[hash0] = uint32(s)
+ table[hash1] = uint32(s + 1)
+ hash2 := hash6(cv>>16, tableBits)
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteralSize(src[nextEmit:base])
+
+ // Extend forward
+ candidate := s - repeat + 4 + checkRep
+ s += 4 + checkRep
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopyNoRepeatSize(repeat, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ cv = load64(src, s)
+ continue
+ }
+
+ if uint32(cv) == load32(src, candidate) {
+ break
+ }
+ candidate = int(table[hash2])
+ if uint32(cv>>8) == load32(src, candidate2) {
+ table[hash2] = uint32(s + 2)
+ candidate = candidate2
+ s++
+ break
+ }
+ table[hash2] = uint32(s + 2)
+ if uint32(cv>>16) == load32(src, candidate) {
+ s += 2
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards
+ for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
+ candidate--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteralSize(src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = base - candidate
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidate += 4
+ for s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopyNoRepeatSize(repeat, s-base)
+ if false {
+ // Validate match.
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Check for an immediate match, otherwise start search at s+1
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>16, tableBits)
+ candidate = int(table[currHash])
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s)
+ if uint32(x>>16) != load32(src, candidate) {
+ cv = load64(src, s+1)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteralSize(src[nextEmit:])
+ }
+ return d
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 0 <= len(lit) && len(lit) <= math.MaxUint32
+func emitLiteralSize(lit []byte) int {
+ if len(lit) == 0 {
+ return 0
+ }
+ switch {
+ case len(lit) <= 60:
+ return len(lit) + 1
+ case len(lit) <= 1<<8:
+ return len(lit) + 2
+ case len(lit) <= 1<<16:
+ return len(lit) + 3
+ case len(lit) <= 1<<24:
+ return len(lit) + 4
+ default:
+ return len(lit) + 5
+ }
+}
+
+func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
+ panic("cvtLZ4BlockAsm should be unreachable")
+}
+
+func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
+ panic("cvtLZ4BlockSnappyAsm should be unreachable")
+}
+
+func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
+ panic("cvtLZ4sBlockAsm should be unreachable")
+}
+
+func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
+ panic("cvtLZ4sBlockSnappyAsm should be unreachable")
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
new file mode 100644
index 0000000000..297e41501b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
@@ -0,0 +1,228 @@
+// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+package s2
+
+func _dummy_()
+
+// encodeBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4294967295 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBlockAsm(dst []byte, src []byte) int
+
+// encodeBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4194304 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBlockAsm4MB(dst []byte, src []byte) int
+
+// encodeBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 16383 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBlockAsm12B(dst []byte, src []byte) int
+
+// encodeBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4095 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBlockAsm10B(dst []byte, src []byte) int
+
+// encodeBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 511 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBlockAsm8B(dst []byte, src []byte) int
+
+// encodeBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4294967295 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBetterBlockAsm(dst []byte, src []byte) int
+
+// encodeBetterBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4194304 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBetterBlockAsm4MB(dst []byte, src []byte) int
+
+// encodeBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 16383 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBetterBlockAsm12B(dst []byte, src []byte) int
+
+// encodeBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4095 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBetterBlockAsm10B(dst []byte, src []byte) int
+
+// encodeBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 511 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeBetterBlockAsm8B(dst []byte, src []byte) int
+
+// encodeSnappyBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4294967295 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBlockAsm(dst []byte, src []byte) int
+
+// encodeSnappyBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 65535 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBlockAsm64K(dst []byte, src []byte) int
+
+// encodeSnappyBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 16383 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBlockAsm12B(dst []byte, src []byte) int
+
+// encodeSnappyBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4095 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBlockAsm10B(dst []byte, src []byte) int
+
+// encodeSnappyBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 511 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBlockAsm8B(dst []byte, src []byte) int
+
+// encodeSnappyBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4294967295 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int
+
+// encodeSnappyBetterBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 65535 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int
+
+// encodeSnappyBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 16383 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int
+
+// encodeSnappyBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4095 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
+
+// encodeSnappyBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 511 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
+
+// calcBlockSize encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4294967295 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func calcBlockSize(src []byte) int
+
+// calcBlockSizeSmall encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 1024 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func calcBlockSizeSmall(src []byte) int
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes with margin of 0 bytes
+// 0 <= len(lit) && len(lit) <= math.MaxUint32
+//
+//go:noescape
+func emitLiteral(dst []byte, lit []byte) int
+
+// emitRepeat writes a repeat chunk and returns the number of bytes written.
+// Length must be at least 4 and < 1<<32
+//
+//go:noescape
+func emitRepeat(dst []byte, offset int, length int) int
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= math.MaxUint32
+// 4 <= length && length <= 1 << 24
+//
+//go:noescape
+func emitCopy(dst []byte, offset int, length int) int
+
+// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= math.MaxUint32
+// 4 <= length && length <= 1 << 24
+//
+//go:noescape
+func emitCopyNoRepeat(dst []byte, offset int, length int) int
+
+// matchLen returns how many bytes match in a and b
+//
+// It assumes that:
+//
+// len(a) <= len(b)
+//
+//go:noescape
+func matchLen(a []byte, b []byte) int
+
+// cvtLZ4Block converts an LZ4 block to S2
+//
+//go:noescape
+func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
+
+// cvtLZ4sBlock converts an LZ4s block to S2
+//
+//go:noescape
+func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
+
+// cvtLZ4Block converts an LZ4 block to Snappy
+//
+//go:noescape
+func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
+
+// cvtLZ4sBlock converts an LZ4s block to Snappy
+//
+//go:noescape
+func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
new file mode 100644
index 0000000000..54031aa313
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
@@ -0,0 +1,20399 @@
+// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+#include "textflag.h"
+
+// func _dummy_()
+TEXT ·_dummy_(SB), $0
+#ifdef GOAMD64_v4
+#ifndef GOAMD64_v3
+#define GOAMD64_v3
+#endif
+#endif
+ RET
+
+// func encodeBlockAsm(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBlockAsm(SB), $65560-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000200, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBlockAsm:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBlockAsm
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBlockAsm:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ SHLQ $0x10, R10
+ IMULQ R8, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeBlockAsm
+ LEAL 1(CX), SI
+ MOVL 12(SP), DI
+ MOVL SI, BX
+ SUBL 16(SP), BX
+ JZ repeat_extend_back_end_encodeBlockAsm
+
+repeat_extend_back_loop_encodeBlockAsm:
+ CMPL SI, DI
+ JBE repeat_extend_back_end_encodeBlockAsm
+ MOVB -1(DX)(BX*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeBlockAsm
+ LEAL -1(SI), SI
+ DECL BX
+ JNZ repeat_extend_back_loop_encodeBlockAsm
+
+repeat_extend_back_end_encodeBlockAsm:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeBlockAsm
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeBlockAsm
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeBlockAsm
+ CMPL BX, $0x00010000
+ JB three_bytes_repeat_emit_encodeBlockAsm
+ CMPL BX, $0x01000000
+ JB four_bytes_repeat_emit_encodeBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL BX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_repeat_emit_encodeBlockAsm
+
+four_bytes_repeat_emit_encodeBlockAsm:
+ MOVL BX, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_repeat_emit_encodeBlockAsm
+
+three_bytes_repeat_emit_encodeBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeBlockAsm
+
+two_bytes_repeat_emit_encodeBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeBlockAsm
+ JMP memmove_long_repeat_emit_encodeBlockAsm
+
+one_byte_repeat_emit_encodeBlockAsm:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeBlockAsm:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_repeat_emit_encodeBlockAsm:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeBlockAsm
+
+memmove_long_repeat_emit_encodeBlockAsm:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
+ JA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(AX)(R12*1), R13
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R12
+ DECQ R11
+ JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
+ JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeBlockAsm:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), R8
+ SUBL CX, R8
+ LEAQ (DX)(CX*1), R9
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R11, R11
+ CMPL R8, $0x08
+ JB matchlen_match4_repeat_extend_encodeBlockAsm
+
+matchlen_loopback_repeat_extend_encodeBlockAsm:
+ MOVQ (R9)(R11*1), R10
+ XORQ (BX)(R11*1), R10
+ TESTQ R10, R10
+ JZ matchlen_loop_repeat_extend_encodeBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeBlockAsm
+
+matchlen_loop_repeat_extend_encodeBlockAsm:
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
+ CMPL R8, $0x08
+ JAE matchlen_loopback_repeat_extend_encodeBlockAsm
+
+matchlen_match4_repeat_extend_encodeBlockAsm:
+ CMPL R8, $0x04
+ JB matchlen_match2_repeat_extend_encodeBlockAsm
+ MOVL (R9)(R11*1), R10
+ CMPL (BX)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeBlockAsm:
+ CMPL R8, $0x01
+ JE matchlen_match1_repeat_extend_encodeBlockAsm
+ JB repeat_extend_forward_end_encodeBlockAsm
+ MOVW (R9)(R11*1), R10
+ CMPW (BX)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
+ JZ repeat_extend_forward_end_encodeBlockAsm
+
+matchlen_match1_repeat_extend_encodeBlockAsm:
+ MOVB (R9)(R11*1), R10
+ CMPB (BX)(R11*1), R10
+ JNE repeat_extend_forward_end_encodeBlockAsm
+ LEAL 1(R11), R11
+
+repeat_extend_forward_end_encodeBlockAsm:
+ ADDL R11, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+ TESTL DI, DI
+ JZ repeat_as_copy_encodeBlockAsm
+
+ // emitRepeat
+emit_repeat_again_match_repeat_encodeBlockAsm:
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_match_repeat_encodeBlockAsm
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_match_repeat_encodeBlockAsm
+
+cant_repeat_two_offset_match_repeat_encodeBlockAsm:
+ CMPL BX, $0x00000104
+ JB repeat_three_match_repeat_encodeBlockAsm
+ CMPL BX, $0x00010100
+ JB repeat_four_match_repeat_encodeBlockAsm
+ CMPL BX, $0x0100ffff
+ JB repeat_five_match_repeat_encodeBlockAsm
+ LEAL -16842747(BX), BX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_repeat_encodeBlockAsm
+
+repeat_five_match_repeat_encodeBlockAsm:
+ LEAL -65536(BX), BX
+ MOVL BX, SI
+ MOVW $0x001d, (AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_four_match_repeat_encodeBlockAsm:
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_three_match_repeat_encodeBlockAsm:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_match_repeat_encodeBlockAsm:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_offset_match_repeat_encodeBlockAsm:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_as_copy_encodeBlockAsm:
+ // emitCopy
+ CMPL SI, $0x00010000
+ JB two_byte_offset_repeat_as_copy_encodeBlockAsm
+ CMPL BX, $0x40
+ JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm
+ MOVB $0xff, (AX)
+ MOVL SI, 1(AX)
+ LEAL -64(BX), BX
+ ADDQ $0x05, AX
+ CMPL BX, $0x04
+ JB four_bytes_remain_repeat_as_copy_encodeBlockAsm
+
+ // emitRepeat
+emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy:
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy
+ CMPL BX, $0x00010100
+ JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy
+ CMPL BX, $0x0100ffff
+ JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy
+ LEAL -16842747(BX), BX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy
+
+repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy:
+ LEAL -65536(BX), BX
+ MOVL BX, SI
+ MOVW $0x001d, (AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy:
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+four_bytes_remain_repeat_as_copy_encodeBlockAsm:
+ TESTL BX, BX
+ JZ repeat_end_emit_encodeBlockAsm
+ XORL DI, DI
+ LEAL -1(DI)(BX*4), BX
+ MOVB BL, (AX)
+ MOVL SI, 1(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+two_byte_offset_repeat_as_copy_encodeBlockAsm:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm
+ CMPL SI, $0x00000800
+ JAE long_offset_short_repeat_as_copy_encodeBlockAsm
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ MOVL SI, R8
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, BX
+
+ // emitRepeat
+ LEAL -4(BX), BX
+ JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+
+emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+ CMPL BX, $0x00010100
+ JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+ CMPL BX, $0x0100ffff
+ JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+ LEAL -16842747(BX), BX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+
+repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -65536(BX), BX
+ MOVL BX, SI
+ MOVW $0x001d, (AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+long_offset_short_repeat_as_copy_encodeBlockAsm:
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+
+ // emitRepeat
+emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short:
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short
+ CMPL BX, $0x00010100
+ JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short
+ CMPL BX, $0x0100ffff
+ JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short
+ LEAL -16842747(BX), BX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short
+
+repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short:
+ LEAL -65536(BX), BX
+ MOVL BX, SI
+ MOVW $0x001d, (AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short:
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+two_byte_offset_short_repeat_as_copy_encodeBlockAsm:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+emit_copy_three_repeat_as_copy_encodeBlockAsm:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeBlockAsm:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeBlockAsm
+
+no_repeat_found_encodeBlockAsm:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBlockAsm
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeBlockAsm
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeBlockAsm
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBlockAsm
+
+candidate3_match_encodeBlockAsm:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeBlockAsm
+
+candidate2_match_encodeBlockAsm:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeBlockAsm:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBlockAsm
+
+match_extend_back_loop_encodeBlockAsm:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBlockAsm
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBlockAsm
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBlockAsm
+ JMP match_extend_back_loop_encodeBlockAsm
+
+match_extend_back_end_encodeBlockAsm:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 5(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBlockAsm:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeBlockAsm
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeBlockAsm
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeBlockAsm
+ CMPL DI, $0x00010000
+ JB three_bytes_match_emit_encodeBlockAsm
+ CMPL DI, $0x01000000
+ JB four_bytes_match_emit_encodeBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL DI, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_match_emit_encodeBlockAsm
+
+four_bytes_match_emit_encodeBlockAsm:
+ MOVL DI, R9
+ SHRL $0x10, R9
+ MOVB $0xf8, (AX)
+ MOVW DI, 1(AX)
+ MOVB R9, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_encodeBlockAsm
+
+three_bytes_match_emit_encodeBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBlockAsm
+
+two_bytes_match_emit_encodeBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeBlockAsm
+ JMP memmove_long_match_emit_encodeBlockAsm
+
+one_byte_match_emit_encodeBlockAsm:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBlockAsm:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm
+
+emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm
+
+emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm
+
+emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBlockAsm:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeBlockAsm
+
+memmove_long_match_emit_encodeBlockAsm:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeBlockAsm:
+match_nolit_loop_encodeBlockAsm:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeBlockAsm
+
+matchlen_loopback_match_nolit_encodeBlockAsm:
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
+ JZ matchlen_loop_match_nolit_encodeBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeBlockAsm
+
+matchlen_loop_match_nolit_encodeBlockAsm:
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeBlockAsm
+
+matchlen_match4_match_nolit_encodeBlockAsm:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeBlockAsm
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeBlockAsm
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeBlockAsm:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeBlockAsm
+ JB match_nolit_end_encodeBlockAsm
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeBlockAsm
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeBlockAsm
+
+matchlen_match1_match_nolit_encodeBlockAsm:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeBlockAsm
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeBlockAsm:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL BX, $0x00010000
+ JB two_byte_offset_match_nolit_encodeBlockAsm
+ CMPL R9, $0x40
+ JBE four_bytes_remain_match_nolit_encodeBlockAsm
+ MOVB $0xff, (AX)
+ MOVL BX, 1(AX)
+ LEAL -64(R9), R9
+ ADDQ $0x05, AX
+ CMPL R9, $0x04
+ JB four_bytes_remain_match_nolit_encodeBlockAsm
+
+ // emitRepeat
+emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy:
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm_emit_copy
+ CMPL R9, $0x00010100
+ JB repeat_four_match_nolit_encodeBlockAsm_emit_copy
+ CMPL R9, $0x0100ffff
+ JB repeat_five_match_nolit_encodeBlockAsm_emit_copy
+ LEAL -16842747(R9), R9
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy
+
+repeat_five_match_nolit_encodeBlockAsm_emit_copy:
+ LEAL -65536(R9), R9
+ MOVL R9, BX
+ MOVW $0x001d, (AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_four_match_nolit_encodeBlockAsm_emit_copy:
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_three_match_nolit_encodeBlockAsm_emit_copy:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_two_match_nolit_encodeBlockAsm_emit_copy:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+four_bytes_remain_match_nolit_encodeBlockAsm:
+ TESTL R9, R9
+ JZ match_nolit_emitcopy_end_encodeBlockAsm
+ XORL SI, SI
+ LEAL -1(SI)(R9*4), R9
+ MOVB R9, (AX)
+ MOVL BX, 1(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+two_byte_offset_match_nolit_encodeBlockAsm:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBlockAsm
+ CMPL BX, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBlockAsm
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB BL, 1(AX)
+ MOVL BX, DI
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R9
+
+ // emitRepeat
+ LEAL -4(R9), R9
+ JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
+
+emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b
+ CMPL R9, $0x00010100
+ JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b
+ CMPL R9, $0x0100ffff
+ JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b
+ LEAL -16842747(R9), R9
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b
+
+repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -65536(R9), R9
+ MOVL R9, BX
+ MOVW $0x001d, (AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+long_offset_short_match_nolit_encodeBlockAsm:
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+
+ // emitRepeat
+emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short:
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short
+ CMPL R9, $0x00010100
+ JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short
+ CMPL R9, $0x0100ffff
+ JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short
+ LEAL -16842747(R9), R9
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short
+
+repeat_five_match_nolit_encodeBlockAsm_emit_copy_short:
+ LEAL -65536(R9), R9
+ MOVL R9, BX
+ MOVW $0x001d, (AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_four_match_nolit_encodeBlockAsm_emit_copy_short:
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_three_match_nolit_encodeBlockAsm_emit_copy_short:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_two_match_nolit_encodeBlockAsm_emit_copy_short:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+two_byte_offset_short_match_nolit_encodeBlockAsm:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBlockAsm
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeBlockAsm
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+emit_copy_three_match_nolit_encodeBlockAsm:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeBlockAsm:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBlockAsm:
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x10, DI
+ IMULQ R8, DI
+ SHRQ $0x32, DI
+ SHLQ $0x10, BX
+ IMULQ R8, BX
+ SHRQ $0x32, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeBlockAsm
+ INCL CX
+ JMP search_loop_encodeBlockAsm
+
+emit_remainder_encodeBlockAsm:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 5(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBlockAsm:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBlockAsm
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBlockAsm
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBlockAsm
+ CMPL DX, $0x00010000
+ JB three_bytes_emit_remainder_encodeBlockAsm
+ CMPL DX, $0x01000000
+ JB four_bytes_emit_remainder_encodeBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL DX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_emit_remainder_encodeBlockAsm
+
+four_bytes_emit_remainder_encodeBlockAsm:
+ MOVL DX, BX
+ SHRL $0x10, BX
+ MOVB $0xf8, (AX)
+ MOVW DX, 1(AX)
+ MOVB BL, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_emit_remainder_encodeBlockAsm
+
+three_bytes_emit_remainder_encodeBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBlockAsm
+
+two_bytes_emit_remainder_encodeBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBlockAsm
+ JMP memmove_long_emit_remainder_encodeBlockAsm
+
+one_byte_emit_remainder_encodeBlockAsm:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBlockAsm:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBlockAsm:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBlockAsm
+
+memmove_long_emit_remainder_encodeBlockAsm:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBlockAsm:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBlockAsm4MB(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBlockAsm4MB(SB), $65560-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000200, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBlockAsm4MB:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBlockAsm4MB
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBlockAsm4MB:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm4MB
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ SHLQ $0x10, R10
+ IMULQ R8, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeBlockAsm4MB
+ LEAL 1(CX), SI
+ MOVL 12(SP), DI
+ MOVL SI, BX
+ SUBL 16(SP), BX
+ JZ repeat_extend_back_end_encodeBlockAsm4MB
+
+repeat_extend_back_loop_encodeBlockAsm4MB:
+ CMPL SI, DI
+ JBE repeat_extend_back_end_encodeBlockAsm4MB
+ MOVB -1(DX)(BX*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeBlockAsm4MB
+ LEAL -1(SI), SI
+ DECL BX
+ JNZ repeat_extend_back_loop_encodeBlockAsm4MB
+
+repeat_extend_back_end_encodeBlockAsm4MB:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeBlockAsm4MB
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeBlockAsm4MB
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeBlockAsm4MB
+ CMPL BX, $0x00010000
+ JB three_bytes_repeat_emit_encodeBlockAsm4MB
+ MOVL BX, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_repeat_emit_encodeBlockAsm4MB
+
+three_bytes_repeat_emit_encodeBlockAsm4MB:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeBlockAsm4MB
+
+two_bytes_repeat_emit_encodeBlockAsm4MB:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeBlockAsm4MB
+ JMP memmove_long_repeat_emit_encodeBlockAsm4MB
+
+one_byte_repeat_emit_encodeBlockAsm4MB:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeBlockAsm4MB:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_repeat_emit_encodeBlockAsm4MB:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeBlockAsm4MB
+
+memmove_long_repeat_emit_encodeBlockAsm4MB:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
+ JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(AX)(R12*1), R13
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R12
+ DECQ R11
+ JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
+ JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeBlockAsm4MB:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), R8
+ SUBL CX, R8
+ LEAQ (DX)(CX*1), R9
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R11, R11
+ CMPL R8, $0x08
+ JB matchlen_match4_repeat_extend_encodeBlockAsm4MB
+
+matchlen_loopback_repeat_extend_encodeBlockAsm4MB:
+ MOVQ (R9)(R11*1), R10
+ XORQ (BX)(R11*1), R10
+ TESTQ R10, R10
+ JZ matchlen_loop_repeat_extend_encodeBlockAsm4MB
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeBlockAsm4MB
+
+matchlen_loop_repeat_extend_encodeBlockAsm4MB:
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
+ CMPL R8, $0x08
+ JAE matchlen_loopback_repeat_extend_encodeBlockAsm4MB
+
+matchlen_match4_repeat_extend_encodeBlockAsm4MB:
+ CMPL R8, $0x04
+ JB matchlen_match2_repeat_extend_encodeBlockAsm4MB
+ MOVL (R9)(R11*1), R10
+ CMPL (BX)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm4MB
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeBlockAsm4MB:
+ CMPL R8, $0x01
+ JE matchlen_match1_repeat_extend_encodeBlockAsm4MB
+ JB repeat_extend_forward_end_encodeBlockAsm4MB
+ MOVW (R9)(R11*1), R10
+ CMPW (BX)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm4MB
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
+ JZ repeat_extend_forward_end_encodeBlockAsm4MB
+
+matchlen_match1_repeat_extend_encodeBlockAsm4MB:
+ MOVB (R9)(R11*1), R10
+ CMPB (BX)(R11*1), R10
+ JNE repeat_extend_forward_end_encodeBlockAsm4MB
+ LEAL 1(R11), R11
+
+repeat_extend_forward_end_encodeBlockAsm4MB:
+ ADDL R11, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+ TESTL DI, DI
+ JZ repeat_as_copy_encodeBlockAsm4MB
+
+ // emitRepeat
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_match_repeat_encodeBlockAsm4MB
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_match_repeat_encodeBlockAsm4MB
+
+cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB:
+ CMPL BX, $0x00000104
+ JB repeat_three_match_repeat_encodeBlockAsm4MB
+ CMPL BX, $0x00010100
+ JB repeat_four_match_repeat_encodeBlockAsm4MB
+ LEAL -65536(BX), BX
+ MOVL BX, SI
+ MOVW $0x001d, (AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_four_match_repeat_encodeBlockAsm4MB:
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_three_match_repeat_encodeBlockAsm4MB:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_match_repeat_encodeBlockAsm4MB:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_offset_match_repeat_encodeBlockAsm4MB:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_as_copy_encodeBlockAsm4MB:
+ // emitCopy
+ CMPL SI, $0x00010000
+ JB two_byte_offset_repeat_as_copy_encodeBlockAsm4MB
+ CMPL BX, $0x40
+ JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB
+ MOVB $0xff, (AX)
+ MOVL SI, 1(AX)
+ LEAL -64(BX), BX
+ ADDQ $0x05, AX
+ CMPL BX, $0x04
+ JB four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB
+
+ // emitRepeat
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy
+ CMPL BX, $0x00010100
+ JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy
+ LEAL -65536(BX), BX
+ MOVL BX, SI
+ MOVW $0x001d, (AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB:
+ TESTL BX, BX
+ JZ repeat_end_emit_encodeBlockAsm4MB
+ XORL DI, DI
+ LEAL -1(DI)(BX*4), BX
+ MOVB BL, (AX)
+ MOVL SI, 1(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+two_byte_offset_repeat_as_copy_encodeBlockAsm4MB:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB
+ CMPL SI, $0x00000800
+ JAE long_offset_short_repeat_as_copy_encodeBlockAsm4MB
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, BX
+
+ // emitRepeat
+ LEAL -4(BX), BX
+ JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL BX, $0x00010100
+ JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+ LEAL -65536(BX), BX
+ MOVL BX, SI
+ MOVW $0x001d, (AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+long_offset_short_repeat_as_copy_encodeBlockAsm4MB:
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
+ CMPL BX, $0x00010100
+ JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
+ LEAL -65536(BX), BX
+ MOVL BX, SI
+ MOVW $0x001d, (AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+emit_copy_three_repeat_as_copy_encodeBlockAsm4MB:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeBlockAsm4MB:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeBlockAsm4MB
+
+no_repeat_found_encodeBlockAsm4MB:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBlockAsm4MB
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeBlockAsm4MB
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeBlockAsm4MB
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBlockAsm4MB
+
+candidate3_match_encodeBlockAsm4MB:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeBlockAsm4MB
+
+candidate2_match_encodeBlockAsm4MB:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeBlockAsm4MB:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBlockAsm4MB
+
+match_extend_back_loop_encodeBlockAsm4MB:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBlockAsm4MB
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBlockAsm4MB
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBlockAsm4MB
+ JMP match_extend_back_loop_encodeBlockAsm4MB
+
+match_extend_back_end_encodeBlockAsm4MB:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 4(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBlockAsm4MB
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBlockAsm4MB:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeBlockAsm4MB
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeBlockAsm4MB
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeBlockAsm4MB
+ CMPL DI, $0x00010000
+ JB three_bytes_match_emit_encodeBlockAsm4MB
+ MOVL DI, R9
+ SHRL $0x10, R9
+ MOVB $0xf8, (AX)
+ MOVW DI, 1(AX)
+ MOVB R9, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_encodeBlockAsm4MB
+
+three_bytes_match_emit_encodeBlockAsm4MB:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBlockAsm4MB
+
+two_bytes_match_emit_encodeBlockAsm4MB:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeBlockAsm4MB
+ JMP memmove_long_match_emit_encodeBlockAsm4MB
+
+one_byte_match_emit_encodeBlockAsm4MB:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBlockAsm4MB:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
+
+emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
+
+emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
+
+emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBlockAsm4MB:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeBlockAsm4MB
+
+memmove_long_match_emit_encodeBlockAsm4MB:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeBlockAsm4MB:
+match_nolit_loop_encodeBlockAsm4MB:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeBlockAsm4MB
+
+matchlen_loopback_match_nolit_encodeBlockAsm4MB:
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
+ JZ matchlen_loop_match_nolit_encodeBlockAsm4MB
+
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeBlockAsm4MB
+
+matchlen_loop_match_nolit_encodeBlockAsm4MB:
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeBlockAsm4MB
+
+matchlen_match4_match_nolit_encodeBlockAsm4MB:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeBlockAsm4MB
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeBlockAsm4MB
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeBlockAsm4MB:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeBlockAsm4MB
+ JB match_nolit_end_encodeBlockAsm4MB
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeBlockAsm4MB
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeBlockAsm4MB
+
+matchlen_match1_match_nolit_encodeBlockAsm4MB:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeBlockAsm4MB
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeBlockAsm4MB:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL BX, $0x00010000
+ JB two_byte_offset_match_nolit_encodeBlockAsm4MB
+ CMPL R9, $0x40
+ JBE four_bytes_remain_match_nolit_encodeBlockAsm4MB
+ MOVB $0xff, (AX)
+ MOVL BX, 1(AX)
+ LEAL -64(R9), R9
+ ADDQ $0x05, AX
+ CMPL R9, $0x04
+ JB four_bytes_remain_match_nolit_encodeBlockAsm4MB
+
+ // emitRepeat
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy
+ CMPL R9, $0x00010100
+ JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy
+ LEAL -65536(R9), R9
+ MOVL R9, BX
+ MOVW $0x001d, (AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy:
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+four_bytes_remain_match_nolit_encodeBlockAsm4MB:
+ TESTL R9, R9
+ JZ match_nolit_emitcopy_end_encodeBlockAsm4MB
+ XORL SI, SI
+ LEAL -1(SI)(R9*4), R9
+ MOVB R9, (AX)
+ MOVL BX, 1(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+two_byte_offset_match_nolit_encodeBlockAsm4MB:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBlockAsm4MB
+ CMPL BX, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBlockAsm4MB
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R9
+
+ // emitRepeat
+ LEAL -4(R9), R9
+ JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL R9, $0x00010100
+ JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+ LEAL -65536(R9), R9
+ MOVL R9, BX
+ MOVW $0x001d, (AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+long_offset_short_match_nolit_encodeBlockAsm4MB:
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short
+ CMPL R9, $0x00010100
+ JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short
+ LEAL -65536(R9), R9
+ MOVL R9, BX
+ MOVW $0x001d, (AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short:
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+two_byte_offset_short_match_nolit_encodeBlockAsm4MB:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBlockAsm4MB
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeBlockAsm4MB
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+emit_copy_three_match_nolit_encodeBlockAsm4MB:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeBlockAsm4MB:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm4MB
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBlockAsm4MB
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBlockAsm4MB:
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x10, DI
+ IMULQ R8, DI
+ SHRQ $0x32, DI
+ SHLQ $0x10, BX
+ IMULQ R8, BX
+ SHRQ $0x32, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeBlockAsm4MB
+ INCL CX
+ JMP search_loop_encodeBlockAsm4MB
+
+emit_remainder_encodeBlockAsm4MB:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 4(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBlockAsm4MB
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBlockAsm4MB:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBlockAsm4MB
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBlockAsm4MB
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBlockAsm4MB
+ CMPL DX, $0x00010000
+ JB three_bytes_emit_remainder_encodeBlockAsm4MB
+ MOVL DX, BX
+ SHRL $0x10, BX
+ MOVB $0xf8, (AX)
+ MOVW DX, 1(AX)
+ MOVB BL, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_emit_remainder_encodeBlockAsm4MB
+
+three_bytes_emit_remainder_encodeBlockAsm4MB:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBlockAsm4MB
+
+two_bytes_emit_remainder_encodeBlockAsm4MB:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBlockAsm4MB
+ JMP memmove_long_emit_remainder_encodeBlockAsm4MB
+
+one_byte_emit_remainder_encodeBlockAsm4MB:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBlockAsm4MB:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBlockAsm4MB:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBlockAsm4MB
+
+memmove_long_emit_remainder_encodeBlockAsm4MB:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBlockAsm4MB:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBlockAsm12B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBlockAsm12B(SB), $16408-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000080, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBlockAsm12B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBlockAsm12B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBlockAsm12B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm12B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x000000cf1bbcdcbb, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x18, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ SHLQ $0x18, R10
+ IMULQ R8, R10
+ SHRQ $0x34, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x18, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeBlockAsm12B
+ LEAL 1(CX), SI
+ MOVL 12(SP), DI
+ MOVL SI, BX
+ SUBL 16(SP), BX
+ JZ repeat_extend_back_end_encodeBlockAsm12B
+
+repeat_extend_back_loop_encodeBlockAsm12B:
+ CMPL SI, DI
+ JBE repeat_extend_back_end_encodeBlockAsm12B
+ MOVB -1(DX)(BX*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeBlockAsm12B
+ LEAL -1(SI), SI
+ DECL BX
+ JNZ repeat_extend_back_loop_encodeBlockAsm12B
+
+repeat_extend_back_end_encodeBlockAsm12B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeBlockAsm12B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeBlockAsm12B
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeBlockAsm12B
+ JB three_bytes_repeat_emit_encodeBlockAsm12B
+
+three_bytes_repeat_emit_encodeBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeBlockAsm12B
+
+two_bytes_repeat_emit_encodeBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeBlockAsm12B
+ JMP memmove_long_repeat_emit_encodeBlockAsm12B
+
+one_byte_repeat_emit_encodeBlockAsm12B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeBlockAsm12B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_repeat_emit_encodeBlockAsm12B:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeBlockAsm12B
+
+memmove_long_repeat_emit_encodeBlockAsm12B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
+ JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(AX)(R12*1), R13
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R12
+ DECQ R11
+ JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
+ JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeBlockAsm12B:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), R8
+ SUBL CX, R8
+ LEAQ (DX)(CX*1), R9
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R11, R11
+ CMPL R8, $0x08
+ JB matchlen_match4_repeat_extend_encodeBlockAsm12B
+
+matchlen_loopback_repeat_extend_encodeBlockAsm12B:
+ MOVQ (R9)(R11*1), R10
+ XORQ (BX)(R11*1), R10
+ TESTQ R10, R10
+ JZ matchlen_loop_repeat_extend_encodeBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeBlockAsm12B
+
+matchlen_loop_repeat_extend_encodeBlockAsm12B:
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
+ CMPL R8, $0x08
+ JAE matchlen_loopback_repeat_extend_encodeBlockAsm12B
+
+matchlen_match4_repeat_extend_encodeBlockAsm12B:
+ CMPL R8, $0x04
+ JB matchlen_match2_repeat_extend_encodeBlockAsm12B
+ MOVL (R9)(R11*1), R10
+ CMPL (BX)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm12B
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeBlockAsm12B:
+ CMPL R8, $0x01
+ JE matchlen_match1_repeat_extend_encodeBlockAsm12B
+ JB repeat_extend_forward_end_encodeBlockAsm12B
+ MOVW (R9)(R11*1), R10
+ CMPW (BX)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm12B
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
+ JZ repeat_extend_forward_end_encodeBlockAsm12B
+
+matchlen_match1_repeat_extend_encodeBlockAsm12B:
+ MOVB (R9)(R11*1), R10
+ CMPB (BX)(R11*1), R10
+ JNE repeat_extend_forward_end_encodeBlockAsm12B
+ LEAL 1(R11), R11
+
+repeat_extend_forward_end_encodeBlockAsm12B:
+ ADDL R11, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+ TESTL DI, DI
+ JZ repeat_as_copy_encodeBlockAsm12B
+
+ // emitRepeat
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_match_repeat_encodeBlockAsm12B
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm12B
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_match_repeat_encodeBlockAsm12B
+
+cant_repeat_two_offset_match_repeat_encodeBlockAsm12B:
+ CMPL BX, $0x00000104
+ JB repeat_three_match_repeat_encodeBlockAsm12B
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_three_match_repeat_encodeBlockAsm12B:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_two_match_repeat_encodeBlockAsm12B:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_two_offset_match_repeat_encodeBlockAsm12B:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_as_copy_encodeBlockAsm12B:
+ // emitCopy
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B
+ CMPL SI, $0x00000800
+ JAE long_offset_short_repeat_as_copy_encodeBlockAsm12B
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, BX
+
+ // emitRepeat
+ LEAL -4(BX), BX
+ JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+long_offset_short_repeat_as_copy_encodeBlockAsm12B:
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+emit_copy_three_repeat_as_copy_encodeBlockAsm12B:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeBlockAsm12B:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeBlockAsm12B
+
+no_repeat_found_encodeBlockAsm12B:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBlockAsm12B
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeBlockAsm12B
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeBlockAsm12B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBlockAsm12B
+
+candidate3_match_encodeBlockAsm12B:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeBlockAsm12B
+
+candidate2_match_encodeBlockAsm12B:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeBlockAsm12B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBlockAsm12B
+
+match_extend_back_loop_encodeBlockAsm12B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBlockAsm12B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBlockAsm12B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBlockAsm12B
+ JMP match_extend_back_loop_encodeBlockAsm12B
+
+match_extend_back_end_encodeBlockAsm12B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBlockAsm12B:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeBlockAsm12B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeBlockAsm12B
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeBlockAsm12B
+ JB three_bytes_match_emit_encodeBlockAsm12B
+
+three_bytes_match_emit_encodeBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBlockAsm12B
+
+two_bytes_match_emit_encodeBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeBlockAsm12B
+ JMP memmove_long_match_emit_encodeBlockAsm12B
+
+one_byte_match_emit_encodeBlockAsm12B:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBlockAsm12B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBlockAsm12B:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeBlockAsm12B
+
+memmove_long_match_emit_encodeBlockAsm12B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeBlockAsm12B:
+match_nolit_loop_encodeBlockAsm12B:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeBlockAsm12B
+
+matchlen_loopback_match_nolit_encodeBlockAsm12B:
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
+ JZ matchlen_loop_match_nolit_encodeBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeBlockAsm12B
+
+matchlen_loop_match_nolit_encodeBlockAsm12B:
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeBlockAsm12B
+
+matchlen_match4_match_nolit_encodeBlockAsm12B:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeBlockAsm12B
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeBlockAsm12B
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeBlockAsm12B:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeBlockAsm12B
+ JB match_nolit_end_encodeBlockAsm12B
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeBlockAsm12B
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeBlockAsm12B
+
+matchlen_match1_match_nolit_encodeBlockAsm12B:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeBlockAsm12B
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeBlockAsm12B:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBlockAsm12B
+ CMPL BX, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBlockAsm12B
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R9
+
+ // emitRepeat
+ LEAL -4(R9), R9
+ JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+long_offset_short_match_nolit_encodeBlockAsm12B:
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+two_byte_offset_short_match_nolit_encodeBlockAsm12B:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBlockAsm12B
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeBlockAsm12B
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+emit_copy_three_match_nolit_encodeBlockAsm12B:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeBlockAsm12B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm12B
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBlockAsm12B:
+ MOVQ $0x000000cf1bbcdcbb, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x18, DI
+ IMULQ R8, DI
+ SHRQ $0x34, DI
+ SHLQ $0x18, BX
+ IMULQ R8, BX
+ SHRQ $0x34, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeBlockAsm12B
+ INCL CX
+ JMP search_loop_encodeBlockAsm12B
+
+emit_remainder_encodeBlockAsm12B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBlockAsm12B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBlockAsm12B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBlockAsm12B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBlockAsm12B
+ JB three_bytes_emit_remainder_encodeBlockAsm12B
+
+three_bytes_emit_remainder_encodeBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBlockAsm12B
+
+two_bytes_emit_remainder_encodeBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBlockAsm12B
+ JMP memmove_long_emit_remainder_encodeBlockAsm12B
+
+one_byte_emit_remainder_encodeBlockAsm12B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBlockAsm12B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBlockAsm12B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBlockAsm12B
+
+memmove_long_emit_remainder_encodeBlockAsm12B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBlockAsm12B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBlockAsm10B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBlockAsm10B(SB), $4120-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000020, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBlockAsm10B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBlockAsm10B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBlockAsm10B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm10B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ SHLQ $0x20, R10
+ IMULQ R8, R10
+ SHRQ $0x36, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeBlockAsm10B
+ LEAL 1(CX), SI
+ MOVL 12(SP), DI
+ MOVL SI, BX
+ SUBL 16(SP), BX
+ JZ repeat_extend_back_end_encodeBlockAsm10B
+
+repeat_extend_back_loop_encodeBlockAsm10B:
+ CMPL SI, DI
+ JBE repeat_extend_back_end_encodeBlockAsm10B
+ MOVB -1(DX)(BX*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeBlockAsm10B
+ LEAL -1(SI), SI
+ DECL BX
+ JNZ repeat_extend_back_loop_encodeBlockAsm10B
+
+repeat_extend_back_end_encodeBlockAsm10B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeBlockAsm10B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeBlockAsm10B
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeBlockAsm10B
+ JB three_bytes_repeat_emit_encodeBlockAsm10B
+
+three_bytes_repeat_emit_encodeBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeBlockAsm10B
+
+two_bytes_repeat_emit_encodeBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeBlockAsm10B
+ JMP memmove_long_repeat_emit_encodeBlockAsm10B
+
+one_byte_repeat_emit_encodeBlockAsm10B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeBlockAsm10B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_repeat_emit_encodeBlockAsm10B:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeBlockAsm10B
+
+memmove_long_repeat_emit_encodeBlockAsm10B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
+ JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(AX)(R12*1), R13
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R12
+ DECQ R11
+ JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
+ JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeBlockAsm10B:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), R8
+ SUBL CX, R8
+ LEAQ (DX)(CX*1), R9
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R11, R11
+ CMPL R8, $0x08
+ JB matchlen_match4_repeat_extend_encodeBlockAsm10B
+
+matchlen_loopback_repeat_extend_encodeBlockAsm10B:
+ MOVQ (R9)(R11*1), R10
+ XORQ (BX)(R11*1), R10
+ TESTQ R10, R10
+ JZ matchlen_loop_repeat_extend_encodeBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeBlockAsm10B
+
+matchlen_loop_repeat_extend_encodeBlockAsm10B:
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
+ CMPL R8, $0x08
+ JAE matchlen_loopback_repeat_extend_encodeBlockAsm10B
+
+matchlen_match4_repeat_extend_encodeBlockAsm10B:
+ CMPL R8, $0x04
+ JB matchlen_match2_repeat_extend_encodeBlockAsm10B
+ MOVL (R9)(R11*1), R10
+ CMPL (BX)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm10B
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeBlockAsm10B:
+ CMPL R8, $0x01
+ JE matchlen_match1_repeat_extend_encodeBlockAsm10B
+ JB repeat_extend_forward_end_encodeBlockAsm10B
+ MOVW (R9)(R11*1), R10
+ CMPW (BX)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm10B
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
+ JZ repeat_extend_forward_end_encodeBlockAsm10B
+
+matchlen_match1_repeat_extend_encodeBlockAsm10B:
+ MOVB (R9)(R11*1), R10
+ CMPB (BX)(R11*1), R10
+ JNE repeat_extend_forward_end_encodeBlockAsm10B
+ LEAL 1(R11), R11
+
+repeat_extend_forward_end_encodeBlockAsm10B:
+ ADDL R11, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+ TESTL DI, DI
+ JZ repeat_as_copy_encodeBlockAsm10B
+
+ // emitRepeat
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_match_repeat_encodeBlockAsm10B
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm10B
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_match_repeat_encodeBlockAsm10B
+
+cant_repeat_two_offset_match_repeat_encodeBlockAsm10B:
+ CMPL BX, $0x00000104
+ JB repeat_three_match_repeat_encodeBlockAsm10B
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_three_match_repeat_encodeBlockAsm10B:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_two_match_repeat_encodeBlockAsm10B:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_two_offset_match_repeat_encodeBlockAsm10B:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_as_copy_encodeBlockAsm10B:
+ // emitCopy
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B
+ CMPL SI, $0x00000800
+ JAE long_offset_short_repeat_as_copy_encodeBlockAsm10B
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, BX
+
+ // emitRepeat
+ LEAL -4(BX), BX
+ JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+long_offset_short_repeat_as_copy_encodeBlockAsm10B:
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
+ CMPL DI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
+ CMPL SI, $0x00000800
+ JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+emit_copy_three_repeat_as_copy_encodeBlockAsm10B:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeBlockAsm10B:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeBlockAsm10B
+
+no_repeat_found_encodeBlockAsm10B:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBlockAsm10B
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeBlockAsm10B
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeBlockAsm10B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBlockAsm10B
+
+candidate3_match_encodeBlockAsm10B:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeBlockAsm10B
+
+candidate2_match_encodeBlockAsm10B:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeBlockAsm10B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBlockAsm10B
+
+match_extend_back_loop_encodeBlockAsm10B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBlockAsm10B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBlockAsm10B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBlockAsm10B
+ JMP match_extend_back_loop_encodeBlockAsm10B
+
+match_extend_back_end_encodeBlockAsm10B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBlockAsm10B:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeBlockAsm10B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeBlockAsm10B
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeBlockAsm10B
+ JB three_bytes_match_emit_encodeBlockAsm10B
+
+three_bytes_match_emit_encodeBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBlockAsm10B
+
+two_bytes_match_emit_encodeBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeBlockAsm10B
+ JMP memmove_long_match_emit_encodeBlockAsm10B
+
+one_byte_match_emit_encodeBlockAsm10B:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBlockAsm10B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBlockAsm10B:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeBlockAsm10B
+
+memmove_long_match_emit_encodeBlockAsm10B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeBlockAsm10B:
+match_nolit_loop_encodeBlockAsm10B:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeBlockAsm10B
+
+matchlen_loopback_match_nolit_encodeBlockAsm10B:
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
+ JZ matchlen_loop_match_nolit_encodeBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeBlockAsm10B
+
+matchlen_loop_match_nolit_encodeBlockAsm10B:
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeBlockAsm10B
+
+matchlen_match4_match_nolit_encodeBlockAsm10B:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeBlockAsm10B
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeBlockAsm10B
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeBlockAsm10B:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeBlockAsm10B
+ JB match_nolit_end_encodeBlockAsm10B
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeBlockAsm10B
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeBlockAsm10B
+
+matchlen_match1_match_nolit_encodeBlockAsm10B:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeBlockAsm10B
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeBlockAsm10B:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBlockAsm10B
+ CMPL BX, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBlockAsm10B
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R9
+
+ // emitRepeat
+ LEAL -4(R9), R9
+ JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+long_offset_short_match_nolit_encodeBlockAsm10B:
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short
+ CMPL BX, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short:
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+two_byte_offset_short_match_nolit_encodeBlockAsm10B:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBlockAsm10B
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeBlockAsm10B
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+emit_copy_three_match_nolit_encodeBlockAsm10B:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeBlockAsm10B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm10B
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBlockAsm10B:
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x20, DI
+ IMULQ R8, DI
+ SHRQ $0x36, DI
+ SHLQ $0x20, BX
+ IMULQ R8, BX
+ SHRQ $0x36, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeBlockAsm10B
+ INCL CX
+ JMP search_loop_encodeBlockAsm10B
+
+emit_remainder_encodeBlockAsm10B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBlockAsm10B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBlockAsm10B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBlockAsm10B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBlockAsm10B
+ JB three_bytes_emit_remainder_encodeBlockAsm10B
+
+three_bytes_emit_remainder_encodeBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBlockAsm10B
+
+two_bytes_emit_remainder_encodeBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBlockAsm10B
+ JMP memmove_long_emit_remainder_encodeBlockAsm10B
+
+one_byte_emit_remainder_encodeBlockAsm10B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBlockAsm10B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBlockAsm10B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBlockAsm10B
+
+memmove_long_emit_remainder_encodeBlockAsm10B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBlockAsm10B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBlockAsm8B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBlockAsm8B(SB), $1048-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000008, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBlockAsm8B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBlockAsm8B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBlockAsm8B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x04, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm8B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x38, R9
+ SHLQ $0x20, R10
+ IMULQ R8, R10
+ SHRQ $0x38, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x38, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeBlockAsm8B
+ LEAL 1(CX), SI
+ MOVL 12(SP), DI
+ MOVL SI, BX
+ SUBL 16(SP), BX
+ JZ repeat_extend_back_end_encodeBlockAsm8B
+
+repeat_extend_back_loop_encodeBlockAsm8B:
+ CMPL SI, DI
+ JBE repeat_extend_back_end_encodeBlockAsm8B
+ MOVB -1(DX)(BX*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeBlockAsm8B
+ LEAL -1(SI), SI
+ DECL BX
+ JNZ repeat_extend_back_loop_encodeBlockAsm8B
+
+repeat_extend_back_end_encodeBlockAsm8B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeBlockAsm8B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeBlockAsm8B
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeBlockAsm8B
+ JB three_bytes_repeat_emit_encodeBlockAsm8B
+
+three_bytes_repeat_emit_encodeBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeBlockAsm8B
+
+two_bytes_repeat_emit_encodeBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeBlockAsm8B
+ JMP memmove_long_repeat_emit_encodeBlockAsm8B
+
+one_byte_repeat_emit_encodeBlockAsm8B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeBlockAsm8B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
+
+emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_repeat_emit_encodeBlockAsm8B:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeBlockAsm8B
+
+memmove_long_repeat_emit_encodeBlockAsm8B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
+ JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(AX)(R12*1), R13
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R12
+ DECQ R11
+ JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
+ JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeBlockAsm8B:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), R8
+ SUBL CX, R8
+ LEAQ (DX)(CX*1), R9
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R11, R11
+ CMPL R8, $0x08
+ JB matchlen_match4_repeat_extend_encodeBlockAsm8B
+
+matchlen_loopback_repeat_extend_encodeBlockAsm8B:
+ MOVQ (R9)(R11*1), R10
+ XORQ (BX)(R11*1), R10
+ TESTQ R10, R10
+ JZ matchlen_loop_repeat_extend_encodeBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeBlockAsm8B
+
+matchlen_loop_repeat_extend_encodeBlockAsm8B:
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
+ CMPL R8, $0x08
+ JAE matchlen_loopback_repeat_extend_encodeBlockAsm8B
+
+matchlen_match4_repeat_extend_encodeBlockAsm8B:
+ CMPL R8, $0x04
+ JB matchlen_match2_repeat_extend_encodeBlockAsm8B
+ MOVL (R9)(R11*1), R10
+ CMPL (BX)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm8B
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeBlockAsm8B:
+ CMPL R8, $0x01
+ JE matchlen_match1_repeat_extend_encodeBlockAsm8B
+ JB repeat_extend_forward_end_encodeBlockAsm8B
+ MOVW (R9)(R11*1), R10
+ CMPW (BX)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm8B
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
+ JZ repeat_extend_forward_end_encodeBlockAsm8B
+
+matchlen_match1_repeat_extend_encodeBlockAsm8B:
+ MOVB (R9)(R11*1), R10
+ CMPB (BX)(R11*1), R10
+ JNE repeat_extend_forward_end_encodeBlockAsm8B
+ LEAL 1(R11), R11
+
+repeat_extend_forward_end_encodeBlockAsm8B:
+ ADDL R11, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+ TESTL DI, DI
+ JZ repeat_as_copy_encodeBlockAsm8B
+
+ // emitRepeat
+ MOVL BX, SI
+ LEAL -4(BX), BX
+ CMPL SI, $0x08
+ JBE repeat_two_match_repeat_encodeBlockAsm8B
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm8B
+
+cant_repeat_two_offset_match_repeat_encodeBlockAsm8B:
+ CMPL BX, $0x00000104
+ JB repeat_three_match_repeat_encodeBlockAsm8B
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+repeat_three_match_repeat_encodeBlockAsm8B:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+repeat_two_match_repeat_encodeBlockAsm8B:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+repeat_as_copy_encodeBlockAsm8B:
+ // emitCopy
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B
+ CMPL SI, $0x00000800
+ JAE long_offset_short_repeat_as_copy_encodeBlockAsm8B
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, BX
+
+ // emitRepeat
+ LEAL -4(BX), BX
+ JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
+ MOVL BX, SI
+ LEAL -4(BX), BX
+ CMPL SI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+long_offset_short_repeat_as_copy_encodeBlockAsm8B:
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL BX, SI
+ LEAL -4(BX), BX
+ CMPL SI, $0x08
+ JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
+ CMPL BX, $0x00000104
+ JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
+ LEAL -256(BX), BX
+ MOVW $0x0019, (AX)
+ MOVW BX, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
+ LEAL -4(BX), BX
+ MOVW $0x0015, (AX)
+ MOVB BL, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeBlockAsm8B
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+emit_copy_three_repeat_as_copy_encodeBlockAsm8B:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeBlockAsm8B:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeBlockAsm8B
+
+no_repeat_found_encodeBlockAsm8B:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBlockAsm8B
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeBlockAsm8B
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeBlockAsm8B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBlockAsm8B
+
+candidate3_match_encodeBlockAsm8B:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeBlockAsm8B
+
+candidate2_match_encodeBlockAsm8B:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeBlockAsm8B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBlockAsm8B
+
+match_extend_back_loop_encodeBlockAsm8B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBlockAsm8B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBlockAsm8B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBlockAsm8B
+ JMP match_extend_back_loop_encodeBlockAsm8B
+
+match_extend_back_end_encodeBlockAsm8B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBlockAsm8B:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeBlockAsm8B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeBlockAsm8B
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeBlockAsm8B
+ JB three_bytes_match_emit_encodeBlockAsm8B
+
+three_bytes_match_emit_encodeBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBlockAsm8B
+
+two_bytes_match_emit_encodeBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeBlockAsm8B
+ JMP memmove_long_match_emit_encodeBlockAsm8B
+
+one_byte_match_emit_encodeBlockAsm8B:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBlockAsm8B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBlockAsm8B:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeBlockAsm8B
+
+memmove_long_match_emit_encodeBlockAsm8B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeBlockAsm8B:
+match_nolit_loop_encodeBlockAsm8B:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeBlockAsm8B
+
+matchlen_loopback_match_nolit_encodeBlockAsm8B:
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
+ JZ matchlen_loop_match_nolit_encodeBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeBlockAsm8B
+
+matchlen_loop_match_nolit_encodeBlockAsm8B:
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeBlockAsm8B
+
+matchlen_match4_match_nolit_encodeBlockAsm8B:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeBlockAsm8B
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeBlockAsm8B
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeBlockAsm8B:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeBlockAsm8B
+ JB match_nolit_end_encodeBlockAsm8B
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeBlockAsm8B
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeBlockAsm8B
+
+matchlen_match1_match_nolit_encodeBlockAsm8B:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeBlockAsm8B
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeBlockAsm8B:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBlockAsm8B
+ CMPL BX, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBlockAsm8B
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R9
+
+ // emitRepeat
+ LEAL -4(R9), R9
+ JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
+ MOVL R9, BX
+ LEAL -4(R9), R9
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+
+repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+
+repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+
+long_offset_short_match_nolit_encodeBlockAsm8B:
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL R9, BX
+ LEAL -4(R9), R9
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short:
+ CMPL R9, $0x00000104
+ JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short
+ LEAL -256(R9), R9
+ MOVW $0x0019, (AX)
+ MOVW R9, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+
+repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short:
+ LEAL -4(R9), R9
+ MOVW $0x0015, (AX)
+ MOVB R9, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+
+repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short:
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+
+two_byte_offset_short_match_nolit_encodeBlockAsm8B:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBlockAsm8B
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+
+emit_copy_three_match_nolit_encodeBlockAsm8B:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeBlockAsm8B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBlockAsm8B
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBlockAsm8B:
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x20, DI
+ IMULQ R8, DI
+ SHRQ $0x38, DI
+ SHLQ $0x20, BX
+ IMULQ R8, BX
+ SHRQ $0x38, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeBlockAsm8B
+ INCL CX
+ JMP search_loop_encodeBlockAsm8B
+
+emit_remainder_encodeBlockAsm8B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBlockAsm8B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBlockAsm8B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBlockAsm8B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBlockAsm8B
+ JB three_bytes_emit_remainder_encodeBlockAsm8B
+
+three_bytes_emit_remainder_encodeBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBlockAsm8B
+
+two_bytes_emit_remainder_encodeBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBlockAsm8B
+ JMP memmove_long_emit_remainder_encodeBlockAsm8B
+
+one_byte_emit_remainder_encodeBlockAsm8B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBlockAsm8B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBlockAsm8B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBlockAsm8B
+
+memmove_long_emit_remainder_encodeBlockAsm8B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBlockAsm8B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBetterBlockAsm(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBetterBlockAsm(SB), $589848-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00001200, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBetterBlockAsm:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBetterBlockAsm
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -6(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBetterBlockAsm:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x07, BX
+ CMPL BX, $0x63
+ JBE check_maxskip_ok_encodeBetterBlockAsm
+ LEAL 100(CX), BX
+ JMP check_maxskip_cont_encodeBetterBlockAsm
+
+check_maxskip_ok_encodeBetterBlockAsm:
+ LEAL 1(CX)(BX*1), BX
+
+check_maxskip_cont_encodeBetterBlockAsm:
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 524312(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 524312(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm
+ CMPQ R10, SI
+ JNE no_short_found_encodeBetterBlockAsm
+ MOVL DI, BX
+ JMP candidate_match_encodeBetterBlockAsm
+
+no_short_found_encodeBetterBlockAsm:
+ CMPL R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm
+ CMPL R10, SI
+ JEQ candidateS_match_encodeBetterBlockAsm
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBetterBlockAsm
+
+candidateS_match_encodeBetterBlockAsm:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBetterBlockAsm
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeBetterBlockAsm:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBetterBlockAsm
+
+match_extend_back_loop_encodeBetterBlockAsm:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBetterBlockAsm
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBetterBlockAsm
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBetterBlockAsm
+ JMP match_extend_back_loop_encodeBetterBlockAsm
+
+match_extend_back_end_encodeBetterBlockAsm:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 5(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBetterBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBetterBlockAsm:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeBetterBlockAsm
+
+matchlen_loopback_match_nolit_encodeBetterBlockAsm:
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
+ JZ matchlen_loop_match_nolit_encodeBetterBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeBetterBlockAsm
+
+matchlen_loop_match_nolit_encodeBetterBlockAsm:
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeBetterBlockAsm
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeBetterBlockAsm
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeBetterBlockAsm
+ JB match_nolit_end_encodeBetterBlockAsm
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeBetterBlockAsm
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeBetterBlockAsm
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeBetterBlockAsm:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ CMPL 16(SP), DI
+ JEQ match_is_repeat_encodeBetterBlockAsm
+ CMPL R11, $0x01
+ JA match_length_ok_encodeBetterBlockAsm
+ CMPL DI, $0x0000ffff
+ JBE match_length_ok_encodeBetterBlockAsm
+ MOVL 20(SP), CX
+ INCL CX
+ JMP search_loop_encodeBetterBlockAsm
+
+match_length_ok_encodeBetterBlockAsm:
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeBetterBlockAsm
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeBetterBlockAsm
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeBetterBlockAsm
+ CMPL BX, $0x00010000
+ JB three_bytes_match_emit_encodeBetterBlockAsm
+ CMPL BX, $0x01000000
+ JB four_bytes_match_emit_encodeBetterBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL BX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_match_emit_encodeBetterBlockAsm
+
+four_bytes_match_emit_encodeBetterBlockAsm:
+ MOVL BX, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_encodeBetterBlockAsm
+
+three_bytes_match_emit_encodeBetterBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBetterBlockAsm
+
+two_bytes_match_emit_encodeBetterBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeBetterBlockAsm
+ JMP memmove_long_match_emit_encodeBetterBlockAsm
+
+one_byte_match_emit_encodeBetterBlockAsm:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBetterBlockAsm:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBetterBlockAsm:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeBetterBlockAsm
+
+memmove_long_match_emit_encodeBetterBlockAsm:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeBetterBlockAsm:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL DI, $0x00010000
+ JB two_byte_offset_match_nolit_encodeBetterBlockAsm
+ CMPL R11, $0x40
+ JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm
+ MOVB $0xff, (AX)
+ MOVL DI, 1(AX)
+ LEAL -64(R11), R11
+ ADDQ $0x05, AX
+ CMPL R11, $0x04
+ JB four_bytes_remain_match_nolit_encodeBetterBlockAsm
+
+ // emitRepeat
+emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy:
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy
+ CMPL R11, $0x00010100
+ JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy
+ CMPL R11, $0x0100ffff
+ JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy
+ LEAL -16842747(R11), R11
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy
+
+repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy:
+ LEAL -65536(R11), R11
+ MOVL R11, DI
+ MOVW $0x001d, (AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy:
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+four_bytes_remain_match_nolit_encodeBetterBlockAsm:
+ TESTL R11, R11
+ JZ match_nolit_emitcopy_end_encodeBetterBlockAsm
+ XORL BX, BX
+ LEAL -1(BX)(R11*4), R11
+ MOVB R11, (AX)
+ MOVL DI, 1(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+two_byte_offset_match_nolit_encodeBetterBlockAsm:
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm
+ CMPL DI, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBetterBlockAsm
+ MOVL $0x00000001, BX
+ LEAL 16(BX), BX
+ MOVB DI, 1(AX)
+ MOVL DI, R8
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R11
+
+ // emitRepeat
+ LEAL -4(R11), R11
+ JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+
+emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+ CMPL R11, $0x00010100
+ JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+ CMPL R11, $0x0100ffff
+ JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+ LEAL -16842747(R11), R11
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+
+repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ LEAL -65536(R11), R11
+ MOVL R11, DI
+ MOVW $0x001d, (AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+long_offset_short_match_nolit_encodeBetterBlockAsm:
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+
+ // emitRepeat
+emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short:
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short
+ CMPL R11, $0x00010100
+ JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short
+ CMPL R11, $0x0100ffff
+ JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short
+ LEAL -16842747(R11), R11
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short
+
+repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short:
+ LEAL -65536(R11), R11
+ MOVL R11, DI
+ MOVW $0x001d, (AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short:
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+two_byte_offset_short_match_nolit_encodeBetterBlockAsm:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm
+ CMPL DI, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+emit_copy_three_match_nolit_encodeBetterBlockAsm:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+match_is_repeat_encodeBetterBlockAsm:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_repeat_encodeBetterBlockAsm
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_repeat_encodeBetterBlockAsm
+ CMPL BX, $0x00010000
+ JB three_bytes_match_emit_repeat_encodeBetterBlockAsm
+ CMPL BX, $0x01000000
+ JB four_bytes_match_emit_repeat_encodeBetterBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL BX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
+
+four_bytes_match_emit_repeat_encodeBetterBlockAsm:
+ MOVL BX, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
+
+three_bytes_match_emit_repeat_encodeBetterBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
+
+two_bytes_match_emit_repeat_encodeBetterBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_repeat_encodeBetterBlockAsm
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
+
+one_byte_match_emit_repeat_encodeBetterBlockAsm:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_repeat_encodeBetterBlockAsm:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm
+
+memmove_long_match_emit_repeat_encodeBetterBlockAsm:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_repeat_encodeBetterBlockAsm:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitRepeat
+emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm:
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm
+
+cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm
+ CMPL R11, $0x00010100
+ JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm
+ CMPL R11, $0x0100ffff
+ JB repeat_five_match_nolit_repeat_encodeBetterBlockAsm
+ LEAL -16842747(R11), R11
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm
+
+repeat_five_match_nolit_repeat_encodeBetterBlockAsm:
+ LEAL -65536(R11), R11
+ MOVL R11, DI
+ MOVW $0x001d, (AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_four_match_nolit_repeat_encodeBetterBlockAsm:
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_three_match_nolit_repeat_encodeBetterBlockAsm:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_match_nolit_repeat_encodeBetterBlockAsm:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+
+match_nolit_emitcopy_end_encodeBetterBlockAsm:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBetterBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBetterBlockAsm:
+ MOVQ $0x00cf1bbcdcbfa563, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x32, R10
+ SHLQ $0x08, R11
+ IMULQ BX, R11
+ SHRQ $0x2f, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x32, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 524312(SP)(R10*4)
+ MOVL R13, 524312(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeBetterBlockAsm:
+ CMPQ DI, R8
+ JAE search_loop_encodeBetterBlockAsm
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x08, R10
+ IMULQ BX, R10
+ SHRQ $0x2f, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeBetterBlockAsm
+
+emit_remainder_encodeBetterBlockAsm:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 5(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBetterBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBetterBlockAsm:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBetterBlockAsm
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBetterBlockAsm
+ CMPL DX, $0x00010000
+ JB three_bytes_emit_remainder_encodeBetterBlockAsm
+ CMPL DX, $0x01000000
+ JB four_bytes_emit_remainder_encodeBetterBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL DX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm
+
+four_bytes_emit_remainder_encodeBetterBlockAsm:
+ MOVL DX, BX
+ SHRL $0x10, BX
+ MOVB $0xf8, (AX)
+ MOVW DX, 1(AX)
+ MOVB BL, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm
+
+three_bytes_emit_remainder_encodeBetterBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm
+
+two_bytes_emit_remainder_encodeBetterBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBetterBlockAsm
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm
+
+one_byte_emit_remainder_encodeBetterBlockAsm:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBetterBlockAsm:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBetterBlockAsm:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm
+
+memmove_long_emit_remainder_encodeBetterBlockAsm:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBetterBlockAsm:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBetterBlockAsm4MB(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBetterBlockAsm4MB(SB), $589848-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00001200, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBetterBlockAsm4MB:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBetterBlockAsm4MB
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -6(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBetterBlockAsm4MB:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x07, BX
+ CMPL BX, $0x63
+ JBE check_maxskip_ok_encodeBetterBlockAsm4MB
+ LEAL 100(CX), BX
+ JMP check_maxskip_cont_encodeBetterBlockAsm4MB
+
+check_maxskip_ok_encodeBetterBlockAsm4MB:
+ LEAL 1(CX)(BX*1), BX
+
+check_maxskip_cont_encodeBetterBlockAsm4MB:
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm4MB
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 524312(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 524312(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm4MB
+ CMPQ R10, SI
+ JNE no_short_found_encodeBetterBlockAsm4MB
+ MOVL DI, BX
+ JMP candidate_match_encodeBetterBlockAsm4MB
+
+no_short_found_encodeBetterBlockAsm4MB:
+ CMPL R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm4MB
+ CMPL R10, SI
+ JEQ candidateS_match_encodeBetterBlockAsm4MB
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBetterBlockAsm4MB
+
+candidateS_match_encodeBetterBlockAsm4MB:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBetterBlockAsm4MB
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeBetterBlockAsm4MB:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBetterBlockAsm4MB
+
+match_extend_back_loop_encodeBetterBlockAsm4MB:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBetterBlockAsm4MB
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBetterBlockAsm4MB
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBetterBlockAsm4MB
+ JMP match_extend_back_loop_encodeBetterBlockAsm4MB
+
+match_extend_back_end_encodeBetterBlockAsm4MB:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 4(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBetterBlockAsm4MB
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBetterBlockAsm4MB:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeBetterBlockAsm4MB
+
+matchlen_loopback_match_nolit_encodeBetterBlockAsm4MB:
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
+ JZ matchlen_loop_match_nolit_encodeBetterBlockAsm4MB
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeBetterBlockAsm4MB
+
+matchlen_loop_match_nolit_encodeBetterBlockAsm4MB:
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeBetterBlockAsm4MB
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm4MB:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeBetterBlockAsm4MB
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm4MB
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm4MB:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
+ JB match_nolit_end_encodeBetterBlockAsm4MB
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeBetterBlockAsm4MB
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm4MB:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeBetterBlockAsm4MB
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeBetterBlockAsm4MB:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ CMPL 16(SP), DI
+ JEQ match_is_repeat_encodeBetterBlockAsm4MB
+ CMPL R11, $0x01
+ JA match_length_ok_encodeBetterBlockAsm4MB
+ CMPL DI, $0x0000ffff
+ JBE match_length_ok_encodeBetterBlockAsm4MB
+ MOVL 20(SP), CX
+ INCL CX
+ JMP search_loop_encodeBetterBlockAsm4MB
+
+match_length_ok_encodeBetterBlockAsm4MB:
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeBetterBlockAsm4MB
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeBetterBlockAsm4MB
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeBetterBlockAsm4MB
+ CMPL BX, $0x00010000
+ JB three_bytes_match_emit_encodeBetterBlockAsm4MB
+ MOVL BX, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
+
+three_bytes_match_emit_encodeBetterBlockAsm4MB:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
+
+two_bytes_match_emit_encodeBetterBlockAsm4MB:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeBetterBlockAsm4MB
+ JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
+
+one_byte_match_emit_encodeBetterBlockAsm4MB:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBetterBlockAsm4MB:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBetterBlockAsm4MB:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeBetterBlockAsm4MB
+
+memmove_long_match_emit_encodeBetterBlockAsm4MB:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeBetterBlockAsm4MB:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL DI, $0x00010000
+ JB two_byte_offset_match_nolit_encodeBetterBlockAsm4MB
+ CMPL R11, $0x40
+ JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB
+ MOVB $0xff, (AX)
+ MOVL DI, 1(AX)
+ LEAL -64(R11), R11
+ ADDQ $0x05, AX
+ CMPL R11, $0x04
+ JB four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy
+ CMPL R11, $0x00010100
+ JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy
+ LEAL -65536(R11), R11
+ MOVL R11, DI
+ MOVW $0x001d, (AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB:
+ TESTL R11, R11
+ JZ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+ XORL BX, BX
+ LEAL -1(BX)(R11*4), R11
+ MOVB R11, (AX)
+ MOVL DI, 1(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+two_byte_offset_match_nolit_encodeBetterBlockAsm4MB:
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB
+ CMPL DI, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBetterBlockAsm4MB
+ MOVL $0x00000001, BX
+ LEAL 16(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R11
+
+ // emitRepeat
+ LEAL -4(R11), R11
+ JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+ CMPL R11, $0x00010100
+ JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+ LEAL -65536(R11), R11
+ MOVL R11, DI
+ MOVW $0x001d, (AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+long_offset_short_match_nolit_encodeBetterBlockAsm4MB:
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
+ CMPL R11, $0x00010100
+ JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
+ LEAL -65536(R11), R11
+ MOVL R11, DI
+ MOVW $0x001d, (AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB
+ CMPL DI, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+emit_copy_three_match_nolit_encodeBetterBlockAsm4MB:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+match_is_repeat_encodeBetterBlockAsm4MB:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_repeat_encodeBetterBlockAsm4MB
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB
+ CMPL BX, $0x00010000
+ JB three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB
+ MOVL BX, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
+
+three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
+
+two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_repeat_encodeBetterBlockAsm4MB
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
+
+one_byte_match_emit_repeat_encodeBetterBlockAsm4MB:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_repeat_encodeBetterBlockAsm4MB:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB
+
+memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB
+
+cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB
+ CMPL R11, $0x00010100
+ JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB
+ LEAL -65536(R11), R11
+ MOVL R11, DI
+ MOVW $0x001d, (AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB:
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+
+match_nolit_emitcopy_end_encodeBetterBlockAsm4MB:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm4MB
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBetterBlockAsm4MB
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBetterBlockAsm4MB:
+ MOVQ $0x00cf1bbcdcbfa563, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x32, R10
+ SHLQ $0x08, R11
+ IMULQ BX, R11
+ SHRQ $0x2f, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x32, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 524312(SP)(R10*4)
+ MOVL R13, 524312(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeBetterBlockAsm4MB:
+ CMPQ DI, R8
+ JAE search_loop_encodeBetterBlockAsm4MB
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x08, R10
+ IMULQ BX, R10
+ SHRQ $0x2f, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeBetterBlockAsm4MB
+
+emit_remainder_encodeBetterBlockAsm4MB:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 4(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBetterBlockAsm4MB
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBetterBlockAsm4MB:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBetterBlockAsm4MB
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBetterBlockAsm4MB
+ CMPL DX, $0x00010000
+ JB three_bytes_emit_remainder_encodeBetterBlockAsm4MB
+ MOVL DX, BX
+ SHRL $0x10, BX
+ MOVB $0xf8, (AX)
+ MOVW DX, 1(AX)
+ MOVB BL, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB
+
+three_bytes_emit_remainder_encodeBetterBlockAsm4MB:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB
+
+two_bytes_emit_remainder_encodeBetterBlockAsm4MB:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBetterBlockAsm4MB
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB
+
+one_byte_emit_remainder_encodeBetterBlockAsm4MB:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBetterBlockAsm4MB:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB
+
+memmove_long_emit_remainder_encodeBetterBlockAsm4MB:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBetterBlockAsm12B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBetterBlockAsm12B(SB), $81944-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000280, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBetterBlockAsm12B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBetterBlockAsm12B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -6(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBetterBlockAsm12B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm12B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x34, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 65560(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 65560(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm12B
+ CMPQ R10, SI
+ JNE no_short_found_encodeBetterBlockAsm12B
+ MOVL DI, BX
+ JMP candidate_match_encodeBetterBlockAsm12B
+
+no_short_found_encodeBetterBlockAsm12B:
+ CMPL R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm12B
+ CMPL R10, SI
+ JEQ candidateS_match_encodeBetterBlockAsm12B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBetterBlockAsm12B
+
+candidateS_match_encodeBetterBlockAsm12B:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBetterBlockAsm12B
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeBetterBlockAsm12B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBetterBlockAsm12B
+
+match_extend_back_loop_encodeBetterBlockAsm12B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBetterBlockAsm12B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBetterBlockAsm12B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBetterBlockAsm12B
+ JMP match_extend_back_loop_encodeBetterBlockAsm12B
+
+match_extend_back_end_encodeBetterBlockAsm12B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBetterBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBetterBlockAsm12B:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeBetterBlockAsm12B
+
+matchlen_loopback_match_nolit_encodeBetterBlockAsm12B:
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
+ JZ matchlen_loop_match_nolit_encodeBetterBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeBetterBlockAsm12B
+
+matchlen_loop_match_nolit_encodeBetterBlockAsm12B:
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeBetterBlockAsm12B
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm12B:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeBetterBlockAsm12B
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm12B
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm12B:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeBetterBlockAsm12B
+ JB match_nolit_end_encodeBetterBlockAsm12B
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm12B
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeBetterBlockAsm12B
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm12B:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeBetterBlockAsm12B
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeBetterBlockAsm12B:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ CMPL 16(SP), DI
+ JEQ match_is_repeat_encodeBetterBlockAsm12B
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeBetterBlockAsm12B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeBetterBlockAsm12B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeBetterBlockAsm12B
+ JB three_bytes_match_emit_encodeBetterBlockAsm12B
+
+three_bytes_match_emit_encodeBetterBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBetterBlockAsm12B
+
+two_bytes_match_emit_encodeBetterBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeBetterBlockAsm12B
+ JMP memmove_long_match_emit_encodeBetterBlockAsm12B
+
+one_byte_match_emit_encodeBetterBlockAsm12B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBetterBlockAsm12B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBetterBlockAsm12B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeBetterBlockAsm12B
+
+memmove_long_match_emit_encodeBetterBlockAsm12B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeBetterBlockAsm12B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B
+ CMPL DI, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBetterBlockAsm12B
+ MOVL $0x00000001, BX
+ LEAL 16(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R11
+
+ // emitRepeat
+ LEAL -4(R11), R11
+ JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+long_offset_short_match_nolit_encodeBetterBlockAsm12B:
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B
+ CMPL DI, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+emit_copy_three_match_nolit_encodeBetterBlockAsm12B:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+match_is_repeat_encodeBetterBlockAsm12B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_repeat_encodeBetterBlockAsm12B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_repeat_encodeBetterBlockAsm12B
+ JB three_bytes_match_emit_repeat_encodeBetterBlockAsm12B
+
+three_bytes_match_emit_repeat_encodeBetterBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B
+
+two_bytes_match_emit_repeat_encodeBetterBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_repeat_encodeBetterBlockAsm12B
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B
+
+one_byte_match_emit_repeat_encodeBetterBlockAsm12B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_repeat_encodeBetterBlockAsm12B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B
+
+memmove_long_match_emit_repeat_encodeBetterBlockAsm12B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B
+
+cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+
+match_nolit_emitcopy_end_encodeBetterBlockAsm12B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm12B
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBetterBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBetterBlockAsm12B:
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x32, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x34, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x32, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x34, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 65560(SP)(R10*4)
+ MOVL R13, 65560(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeBetterBlockAsm12B:
+ CMPQ DI, R8
+ JAE search_loop_encodeBetterBlockAsm12B
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x32, R9
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeBetterBlockAsm12B
+
+emit_remainder_encodeBetterBlockAsm12B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBetterBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBetterBlockAsm12B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm12B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBetterBlockAsm12B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBetterBlockAsm12B
+ JB three_bytes_emit_remainder_encodeBetterBlockAsm12B
+
+three_bytes_emit_remainder_encodeBetterBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B
+
+two_bytes_emit_remainder_encodeBetterBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBetterBlockAsm12B
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B
+
+one_byte_emit_remainder_encodeBetterBlockAsm12B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBetterBlockAsm12B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm12B
+
+memmove_long_emit_remainder_encodeBetterBlockAsm12B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBetterBlockAsm12B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBetterBlockAsm10B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBetterBlockAsm10B(SB), $20504-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x000000a0, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBetterBlockAsm10B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBetterBlockAsm10B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -6(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBetterBlockAsm10B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm10B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x36, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 16408(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 16408(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm10B
+ CMPQ R10, SI
+ JNE no_short_found_encodeBetterBlockAsm10B
+ MOVL DI, BX
+ JMP candidate_match_encodeBetterBlockAsm10B
+
+no_short_found_encodeBetterBlockAsm10B:
+ CMPL R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm10B
+ CMPL R10, SI
+ JEQ candidateS_match_encodeBetterBlockAsm10B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBetterBlockAsm10B
+
+candidateS_match_encodeBetterBlockAsm10B:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBetterBlockAsm10B
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeBetterBlockAsm10B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBetterBlockAsm10B
+
+match_extend_back_loop_encodeBetterBlockAsm10B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBetterBlockAsm10B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBetterBlockAsm10B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBetterBlockAsm10B
+ JMP match_extend_back_loop_encodeBetterBlockAsm10B
+
+match_extend_back_end_encodeBetterBlockAsm10B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBetterBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBetterBlockAsm10B:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeBetterBlockAsm10B
+
+matchlen_loopback_match_nolit_encodeBetterBlockAsm10B:
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
+ JZ matchlen_loop_match_nolit_encodeBetterBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeBetterBlockAsm10B
+
+matchlen_loop_match_nolit_encodeBetterBlockAsm10B:
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeBetterBlockAsm10B
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm10B:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeBetterBlockAsm10B
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm10B
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm10B:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeBetterBlockAsm10B
+ JB match_nolit_end_encodeBetterBlockAsm10B
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm10B
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeBetterBlockAsm10B
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm10B:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeBetterBlockAsm10B
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeBetterBlockAsm10B:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ CMPL 16(SP), DI
+ JEQ match_is_repeat_encodeBetterBlockAsm10B
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeBetterBlockAsm10B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeBetterBlockAsm10B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeBetterBlockAsm10B
+ JB three_bytes_match_emit_encodeBetterBlockAsm10B
+
+three_bytes_match_emit_encodeBetterBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBetterBlockAsm10B
+
+two_bytes_match_emit_encodeBetterBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeBetterBlockAsm10B
+ JMP memmove_long_match_emit_encodeBetterBlockAsm10B
+
+one_byte_match_emit_encodeBetterBlockAsm10B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBetterBlockAsm10B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBetterBlockAsm10B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeBetterBlockAsm10B
+
+memmove_long_match_emit_encodeBetterBlockAsm10B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeBetterBlockAsm10B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B
+ CMPL DI, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBetterBlockAsm10B
+ MOVL $0x00000001, BX
+ LEAL 16(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R11
+
+ // emitRepeat
+ LEAL -4(R11), R11
+ JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+long_offset_short_match_nolit_encodeBetterBlockAsm10B:
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B
+ CMPL DI, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+emit_copy_three_match_nolit_encodeBetterBlockAsm10B:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+match_is_repeat_encodeBetterBlockAsm10B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_repeat_encodeBetterBlockAsm10B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_repeat_encodeBetterBlockAsm10B
+ JB three_bytes_match_emit_repeat_encodeBetterBlockAsm10B
+
+three_bytes_match_emit_repeat_encodeBetterBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B
+
+two_bytes_match_emit_repeat_encodeBetterBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_repeat_encodeBetterBlockAsm10B
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B
+
+one_byte_match_emit_repeat_encodeBetterBlockAsm10B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_repeat_encodeBetterBlockAsm10B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B
+
+memmove_long_match_emit_repeat_encodeBetterBlockAsm10B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B
+ CMPL DI, $0x00000800
+ JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B
+
+cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B:
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+
+match_nolit_emitcopy_end_encodeBetterBlockAsm10B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm10B
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBetterBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBetterBlockAsm10B:
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x34, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x36, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x34, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x36, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 16408(SP)(R10*4)
+ MOVL R13, 16408(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeBetterBlockAsm10B:
+ CMPQ DI, R8
+ JAE search_loop_encodeBetterBlockAsm10B
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x34, R9
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x34, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeBetterBlockAsm10B
+
+emit_remainder_encodeBetterBlockAsm10B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBetterBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBetterBlockAsm10B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm10B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBetterBlockAsm10B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBetterBlockAsm10B
+ JB three_bytes_emit_remainder_encodeBetterBlockAsm10B
+
+three_bytes_emit_remainder_encodeBetterBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B
+
+two_bytes_emit_remainder_encodeBetterBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBetterBlockAsm10B
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B
+
+one_byte_emit_remainder_encodeBetterBlockAsm10B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBetterBlockAsm10B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm10B
+
+memmove_long_emit_remainder_encodeBetterBlockAsm10B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBetterBlockAsm10B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeBetterBlockAsm8B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeBetterBlockAsm8B(SB), $5144-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000028, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeBetterBlockAsm8B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeBetterBlockAsm8B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -6(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeBetterBlockAsm8B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x04, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm8B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x38, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 4120(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 4120(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm8B
+ CMPQ R10, SI
+ JNE no_short_found_encodeBetterBlockAsm8B
+ MOVL DI, BX
+ JMP candidate_match_encodeBetterBlockAsm8B
+
+no_short_found_encodeBetterBlockAsm8B:
+ CMPL R9, SI
+ JEQ candidate_match_encodeBetterBlockAsm8B
+ CMPL R10, SI
+ JEQ candidateS_match_encodeBetterBlockAsm8B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBetterBlockAsm8B
+
+candidateS_match_encodeBetterBlockAsm8B:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeBetterBlockAsm8B
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeBetterBlockAsm8B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeBetterBlockAsm8B
+
+match_extend_back_loop_encodeBetterBlockAsm8B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeBetterBlockAsm8B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeBetterBlockAsm8B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeBetterBlockAsm8B
+ JMP match_extend_back_loop_encodeBetterBlockAsm8B
+
+match_extend_back_end_encodeBetterBlockAsm8B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeBetterBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeBetterBlockAsm8B:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeBetterBlockAsm8B
+
+matchlen_loopback_match_nolit_encodeBetterBlockAsm8B:
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
+ JZ matchlen_loop_match_nolit_encodeBetterBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeBetterBlockAsm8B
+
+matchlen_loop_match_nolit_encodeBetterBlockAsm8B:
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeBetterBlockAsm8B
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm8B:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeBetterBlockAsm8B
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm8B
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm8B:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeBetterBlockAsm8B
+ JB match_nolit_end_encodeBetterBlockAsm8B
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm8B
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeBetterBlockAsm8B
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm8B:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeBetterBlockAsm8B
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeBetterBlockAsm8B:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ CMPL 16(SP), DI
+ JEQ match_is_repeat_encodeBetterBlockAsm8B
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeBetterBlockAsm8B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeBetterBlockAsm8B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeBetterBlockAsm8B
+ JB three_bytes_match_emit_encodeBetterBlockAsm8B
+
+three_bytes_match_emit_encodeBetterBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeBetterBlockAsm8B
+
+two_bytes_match_emit_encodeBetterBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeBetterBlockAsm8B
+ JMP memmove_long_match_emit_encodeBetterBlockAsm8B
+
+one_byte_match_emit_encodeBetterBlockAsm8B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeBetterBlockAsm8B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x04
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4
+ CMPQ R8, $0x08
+ JB emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4:
+ MOVL (R9), R10
+ MOVL R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7:
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeBetterBlockAsm8B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeBetterBlockAsm8B
+
+memmove_long_match_emit_encodeBetterBlockAsm8B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeBetterBlockAsm8B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B
+ CMPL DI, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBetterBlockAsm8B
+ MOVL $0x00000001, BX
+ LEAL 16(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R11
+
+ // emitRepeat
+ LEAL -4(R11), R11
+ JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+long_offset_short_match_nolit_encodeBetterBlockAsm8B:
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeBetterBlockAsm8B
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+emit_copy_three_match_nolit_encodeBetterBlockAsm8B:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+match_is_repeat_encodeBetterBlockAsm8B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_repeat_encodeBetterBlockAsm8B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_repeat_encodeBetterBlockAsm8B
+ JB three_bytes_match_emit_repeat_encodeBetterBlockAsm8B
+
+three_bytes_match_emit_repeat_encodeBetterBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B
+
+two_bytes_match_emit_repeat_encodeBetterBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_repeat_encodeBetterBlockAsm8B
+ JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B
+
+one_byte_match_emit_repeat_encodeBetterBlockAsm8B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_repeat_encodeBetterBlockAsm8B:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveShort
+ CMPQ DI, $0x04
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4
+ CMPQ DI, $0x08
+ JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7
+ CMPQ DI, $0x10
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16
+ CMPQ DI, $0x20
+ JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4:
+ MOVL (R8), R9
+ MOVL R9, (AX)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7:
+ MOVL (R8), R9
+ MOVL -4(R8)(DI*1), R8
+ MOVL R9, (AX)
+ MOVL R8, -4(AX)(DI*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16:
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32:
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(DI*1)
+ JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64:
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+
+memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B
+
+memmove_long_match_emit_repeat_encodeBetterBlockAsm8B:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveLong
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R12
+ SUBQ R9, R12
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(R8)(R12*1), R9
+ LEAQ -32(AX)(R12*1), R13
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R9
+ ADDQ $0x20, R12
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(R8)(R12*1), X4
+ MOVOU -16(R8)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ DI, R12
+ JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitRepeat
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
+ JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B
+ CMPL BX, $0x0c
+ JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B
+
+cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B:
+ CMPL R11, $0x00000104
+ JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B
+ LEAL -256(R11), R11
+ MOVW $0x0019, (AX)
+ MOVW R11, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B:
+ LEAL -4(R11), R11
+ MOVW $0x0015, (AX)
+ MOVB R11, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B:
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
+ ADDQ $0x02, AX
+
+match_nolit_emitcopy_end_encodeBetterBlockAsm8B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeBetterBlockAsm8B
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeBetterBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeBetterBlockAsm8B:
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x36, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x38, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x36, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x38, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 4120(SP)(R10*4)
+ MOVL R13, 4120(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeBetterBlockAsm8B:
+ CMPQ DI, R8
+ JAE search_loop_encodeBetterBlockAsm8B
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x36, R9
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x36, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeBetterBlockAsm8B
+
+emit_remainder_encodeBetterBlockAsm8B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeBetterBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeBetterBlockAsm8B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm8B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeBetterBlockAsm8B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeBetterBlockAsm8B
+ JB three_bytes_emit_remainder_encodeBetterBlockAsm8B
+
+three_bytes_emit_remainder_encodeBetterBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B
+
+two_bytes_emit_remainder_encodeBetterBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeBetterBlockAsm8B
+ JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B
+
+one_byte_emit_remainder_encodeBetterBlockAsm8B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeBetterBlockAsm8B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm8B
+
+memmove_long_emit_remainder_encodeBetterBlockAsm8B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeBetterBlockAsm8B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBlockAsm(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBlockAsm(SB), $65560-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000200, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBlockAsm:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBlockAsm
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBlockAsm:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ SHLQ $0x10, R10
+ IMULQ R8, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeSnappyBlockAsm
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
+ JZ repeat_extend_back_end_encodeSnappyBlockAsm
+
+repeat_extend_back_loop_encodeSnappyBlockAsm:
+ CMPL SI, BX
+ JBE repeat_extend_back_end_encodeSnappyBlockAsm
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeSnappyBlockAsm
+ LEAL -1(SI), SI
+ DECL DI
+ JNZ repeat_extend_back_loop_encodeSnappyBlockAsm
+
+repeat_extend_back_end_encodeSnappyBlockAsm:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeSnappyBlockAsm
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeSnappyBlockAsm
+ CMPL BX, $0x00010000
+ JB three_bytes_repeat_emit_encodeSnappyBlockAsm
+ CMPL BX, $0x01000000
+ JB four_bytes_repeat_emit_encodeSnappyBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL BX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
+
+four_bytes_repeat_emit_encodeSnappyBlockAsm:
+ MOVL BX, R9
+ SHRL $0x10, R9
+ MOVB $0xf8, (AX)
+ MOVW BX, 1(AX)
+ MOVB R9, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
+
+three_bytes_repeat_emit_encodeSnappyBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
+
+two_bytes_repeat_emit_encodeSnappyBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeSnappyBlockAsm
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
+
+one_byte_repeat_emit_encodeSnappyBlockAsm:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeSnappyBlockAsm:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveShort
+ CMPQ DI, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8
+ CMPQ DI, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16
+ CMPQ DI, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8:
+ MOVQ (R8), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16:
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32:
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64:
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+
+memmove_end_copy_repeat_emit_encodeSnappyBlockAsm:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm
+
+memmove_long_repeat_emit_encodeSnappyBlockAsm:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveLong
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(R8)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(R8)(R11*1), X4
+ MOVOU -16(R8)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ DI, R11
+ JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeSnappyBlockAsm:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R10, R10
+ CMPL DI, $0x08
+ JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm
+
+matchlen_loopback_repeat_extend_encodeSnappyBlockAsm:
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ TESTQ R9, R9
+ JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm
+
+matchlen_loop_repeat_extend_encodeSnappyBlockAsm:
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ CMPL DI, $0x08
+ JAE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm:
+ CMPL DI, $0x04
+ JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm:
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm
+ JB repeat_extend_forward_end_encodeSnappyBlockAsm
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm:
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
+ JNE repeat_extend_forward_end_encodeSnappyBlockAsm
+ LEAL 1(R10), R10
+
+repeat_extend_forward_end_encodeSnappyBlockAsm:
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+
+ // emitCopy
+ CMPL SI, $0x00010000
+ JB two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm
+
+four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm:
+ CMPL BX, $0x40
+ JBE four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm
+ MOVB $0xff, (AX)
+ MOVL SI, 1(AX)
+ LEAL -64(BX), BX
+ ADDQ $0x05, AX
+ CMPL BX, $0x04
+ JB four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm
+ JMP four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm
+
+four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm:
+ TESTL BX, BX
+ JZ repeat_end_emit_encodeSnappyBlockAsm
+ XORL DI, DI
+ LEAL -1(DI)(BX*4), BX
+ MOVB BL, (AX)
+ MOVL SI, 1(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeSnappyBlockAsm
+
+two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+ JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm
+
+two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeSnappyBlockAsm
+
+emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeSnappyBlockAsm:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeSnappyBlockAsm
+
+no_repeat_found_encodeSnappyBlockAsm:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBlockAsm
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeSnappyBlockAsm
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeSnappyBlockAsm
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBlockAsm
+
+candidate3_match_encodeSnappyBlockAsm:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeSnappyBlockAsm
+
+candidate2_match_encodeSnappyBlockAsm:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBlockAsm:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm
+
+match_extend_back_loop_encodeSnappyBlockAsm:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBlockAsm
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBlockAsm
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm
+ JMP match_extend_back_loop_encodeSnappyBlockAsm
+
+match_extend_back_end_encodeSnappyBlockAsm:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 5(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBlockAsm:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeSnappyBlockAsm
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBlockAsm
+ CMPL DI, $0x00010000
+ JB three_bytes_match_emit_encodeSnappyBlockAsm
+ CMPL DI, $0x01000000
+ JB four_bytes_match_emit_encodeSnappyBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL DI, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm
+
+four_bytes_match_emit_encodeSnappyBlockAsm:
+ MOVL DI, R9
+ SHRL $0x10, R9
+ MOVB $0xf8, (AX)
+ MOVW DI, 1(AX)
+ MOVB R9, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm
+
+three_bytes_match_emit_encodeSnappyBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm
+
+two_bytes_match_emit_encodeSnappyBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeSnappyBlockAsm
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm
+
+one_byte_match_emit_encodeSnappyBlockAsm:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBlockAsm:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBlockAsm:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBlockAsm
+
+memmove_long_match_emit_encodeSnappyBlockAsm:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeSnappyBlockAsm:
+match_nolit_loop_encodeSnappyBlockAsm:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBlockAsm
+
+matchlen_loopback_match_nolit_encodeSnappyBlockAsm:
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
+ JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeSnappyBlockAsm
+
+matchlen_loop_match_nolit_encodeSnappyBlockAsm:
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeSnappyBlockAsm
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBlockAsm
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBlockAsm
+ JB match_nolit_end_encodeSnappyBlockAsm
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeSnappyBlockAsm
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeSnappyBlockAsm
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeSnappyBlockAsm:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL BX, $0x00010000
+ JB two_byte_offset_match_nolit_encodeSnappyBlockAsm
+
+four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm:
+ CMPL R9, $0x40
+ JBE four_bytes_remain_match_nolit_encodeSnappyBlockAsm
+ MOVB $0xff, (AX)
+ MOVL BX, 1(AX)
+ LEAL -64(R9), R9
+ ADDQ $0x05, AX
+ CMPL R9, $0x04
+ JB four_bytes_remain_match_nolit_encodeSnappyBlockAsm
+ JMP four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm
+
+four_bytes_remain_match_nolit_encodeSnappyBlockAsm:
+ TESTL R9, R9
+ JZ match_nolit_emitcopy_end_encodeSnappyBlockAsm
+ XORL SI, SI
+ LEAL -1(SI)(R9*4), R9
+ MOVB R9, (AX)
+ MOVL BX, 1(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm
+
+two_byte_offset_match_nolit_encodeSnappyBlockAsm:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm
+
+two_byte_offset_short_match_nolit_encodeSnappyBlockAsm:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm
+
+emit_copy_three_match_nolit_encodeSnappyBlockAsm:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBlockAsm:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBlockAsm:
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x10, DI
+ IMULQ R8, DI
+ SHRQ $0x32, DI
+ SHLQ $0x10, BX
+ IMULQ R8, BX
+ SHRQ $0x32, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeSnappyBlockAsm
+ INCL CX
+ JMP search_loop_encodeSnappyBlockAsm
+
+emit_remainder_encodeSnappyBlockAsm:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 5(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBlockAsm:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBlockAsm
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBlockAsm
+ CMPL DX, $0x00010000
+ JB three_bytes_emit_remainder_encodeSnappyBlockAsm
+ CMPL DX, $0x01000000
+ JB four_bytes_emit_remainder_encodeSnappyBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL DX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
+
+four_bytes_emit_remainder_encodeSnappyBlockAsm:
+ MOVL DX, BX
+ SHRL $0x10, BX
+ MOVB $0xf8, (AX)
+ MOVW DX, 1(AX)
+ MOVB BL, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
+
+three_bytes_emit_remainder_encodeSnappyBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
+
+two_bytes_emit_remainder_encodeSnappyBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBlockAsm
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
+
+one_byte_emit_remainder_encodeSnappyBlockAsm:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBlockAsm:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBlockAsm:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm
+
+memmove_long_emit_remainder_encodeSnappyBlockAsm:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBlockAsm:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBlockAsm64K(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBlockAsm64K(SB), $65560-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000200, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBlockAsm64K:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBlockAsm64K
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBlockAsm64K:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm64K
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ SHLQ $0x10, R10
+ IMULQ R8, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeSnappyBlockAsm64K
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
+ JZ repeat_extend_back_end_encodeSnappyBlockAsm64K
+
+repeat_extend_back_loop_encodeSnappyBlockAsm64K:
+ CMPL SI, BX
+ JBE repeat_extend_back_end_encodeSnappyBlockAsm64K
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeSnappyBlockAsm64K
+ LEAL -1(SI), SI
+ DECL DI
+ JNZ repeat_extend_back_loop_encodeSnappyBlockAsm64K
+
+repeat_extend_back_end_encodeSnappyBlockAsm64K:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeSnappyBlockAsm64K
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeSnappyBlockAsm64K
+ JB three_bytes_repeat_emit_encodeSnappyBlockAsm64K
+
+three_bytes_repeat_emit_encodeSnappyBlockAsm64K:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K
+
+two_bytes_repeat_emit_encodeSnappyBlockAsm64K:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeSnappyBlockAsm64K
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K
+
+one_byte_repeat_emit_encodeSnappyBlockAsm64K:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeSnappyBlockAsm64K:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveShort
+ CMPQ DI, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8
+ CMPQ DI, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16
+ CMPQ DI, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8:
+ MOVQ (R8), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16:
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32:
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64:
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+
+memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K
+
+memmove_long_repeat_emit_encodeSnappyBlockAsm64K:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveLong
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
+ LEAQ -32(R8)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32:
+ MOVOU -32(R8)(R11*1), X4
+ MOVOU -16(R8)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ DI, R11
+ JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R10, R10
+ CMPL DI, $0x08
+ JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K
+
+matchlen_loopback_repeat_extend_encodeSnappyBlockAsm64K:
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ TESTQ R9, R9
+ JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm64K
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K
+
+matchlen_loop_repeat_extend_encodeSnappyBlockAsm64K:
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ CMPL DI, $0x08
+ JAE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm64K
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K:
+ CMPL DI, $0x04
+ JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K:
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
+ JB repeat_extend_forward_end_encodeSnappyBlockAsm64K
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K:
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
+ JNE repeat_extend_forward_end_encodeSnappyBlockAsm64K
+ LEAL 1(R10), R10
+
+repeat_extend_forward_end_encodeSnappyBlockAsm64K:
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+
+ // emitCopy
+two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+ JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K
+
+two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeSnappyBlockAsm64K
+
+emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeSnappyBlockAsm64K:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeSnappyBlockAsm64K
+
+no_repeat_found_encodeSnappyBlockAsm64K:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBlockAsm64K
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeSnappyBlockAsm64K
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeSnappyBlockAsm64K
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBlockAsm64K
+
+candidate3_match_encodeSnappyBlockAsm64K:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeSnappyBlockAsm64K
+
+candidate2_match_encodeSnappyBlockAsm64K:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBlockAsm64K:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm64K
+
+match_extend_back_loop_encodeSnappyBlockAsm64K:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBlockAsm64K
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBlockAsm64K
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm64K
+ JMP match_extend_back_loop_encodeSnappyBlockAsm64K
+
+match_extend_back_end_encodeSnappyBlockAsm64K:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBlockAsm64K
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBlockAsm64K:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm64K
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeSnappyBlockAsm64K
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBlockAsm64K
+ JB three_bytes_match_emit_encodeSnappyBlockAsm64K
+
+three_bytes_match_emit_encodeSnappyBlockAsm64K:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm64K
+
+two_bytes_match_emit_encodeSnappyBlockAsm64K:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeSnappyBlockAsm64K
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm64K
+
+one_byte_match_emit_encodeSnappyBlockAsm64K:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBlockAsm64K:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBlockAsm64K:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBlockAsm64K
+
+memmove_long_match_emit_encodeSnappyBlockAsm64K:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeSnappyBlockAsm64K:
+match_nolit_loop_encodeSnappyBlockAsm64K:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBlockAsm64K
+
+matchlen_loopback_match_nolit_encodeSnappyBlockAsm64K:
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
+ JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm64K
+
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeSnappyBlockAsm64K
+
+matchlen_loop_match_nolit_encodeSnappyBlockAsm64K:
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeSnappyBlockAsm64K
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm64K:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBlockAsm64K
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm64K
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm64K:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
+ JB match_nolit_end_encodeSnappyBlockAsm64K
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeSnappyBlockAsm64K
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm64K:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeSnappyBlockAsm64K
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeSnappyBlockAsm64K:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_encodeSnappyBlockAsm64K:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm64K
+
+two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm64K
+
+emit_copy_three_match_nolit_encodeSnappyBlockAsm64K:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBlockAsm64K:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm64K
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBlockAsm64K
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBlockAsm64K:
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x10, DI
+ IMULQ R8, DI
+ SHRQ $0x32, DI
+ SHLQ $0x10, BX
+ IMULQ R8, BX
+ SHRQ $0x32, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeSnappyBlockAsm64K
+ INCL CX
+ JMP search_loop_encodeSnappyBlockAsm64K
+
+emit_remainder_encodeSnappyBlockAsm64K:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBlockAsm64K
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBlockAsm64K:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBlockAsm64K
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBlockAsm64K
+ JB three_bytes_emit_remainder_encodeSnappyBlockAsm64K
+
+three_bytes_emit_remainder_encodeSnappyBlockAsm64K:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K
+
+two_bytes_emit_remainder_encodeSnappyBlockAsm64K:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBlockAsm64K
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K
+
+one_byte_emit_remainder_encodeSnappyBlockAsm64K:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBlockAsm64K:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K
+
+memmove_long_emit_remainder_encodeSnappyBlockAsm64K:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBlockAsm12B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBlockAsm12B(SB), $16408-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000080, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBlockAsm12B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBlockAsm12B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBlockAsm12B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm12B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x000000cf1bbcdcbb, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x18, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ SHLQ $0x18, R10
+ IMULQ R8, R10
+ SHRQ $0x34, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x18, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeSnappyBlockAsm12B
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
+ JZ repeat_extend_back_end_encodeSnappyBlockAsm12B
+
+repeat_extend_back_loop_encodeSnappyBlockAsm12B:
+ CMPL SI, BX
+ JBE repeat_extend_back_end_encodeSnappyBlockAsm12B
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeSnappyBlockAsm12B
+ LEAL -1(SI), SI
+ DECL DI
+ JNZ repeat_extend_back_loop_encodeSnappyBlockAsm12B
+
+repeat_extend_back_end_encodeSnappyBlockAsm12B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeSnappyBlockAsm12B
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeSnappyBlockAsm12B
+ JB three_bytes_repeat_emit_encodeSnappyBlockAsm12B
+
+three_bytes_repeat_emit_encodeSnappyBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B
+
+two_bytes_repeat_emit_encodeSnappyBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeSnappyBlockAsm12B
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B
+
+one_byte_repeat_emit_encodeSnappyBlockAsm12B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeSnappyBlockAsm12B:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveShort
+ CMPQ DI, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8
+ CMPQ DI, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16
+ CMPQ DI, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8:
+ MOVQ (R8), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16:
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32:
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64:
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+
+memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B
+
+memmove_long_repeat_emit_encodeSnappyBlockAsm12B:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveLong
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(R8)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(R8)(R11*1), X4
+ MOVOU -16(R8)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ DI, R11
+ JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R10, R10
+ CMPL DI, $0x08
+ JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B
+
+matchlen_loopback_repeat_extend_encodeSnappyBlockAsm12B:
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ TESTQ R9, R9
+ JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B
+
+matchlen_loop_repeat_extend_encodeSnappyBlockAsm12B:
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ CMPL DI, $0x08
+ JAE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm12B
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B:
+ CMPL DI, $0x04
+ JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B:
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
+ JB repeat_extend_forward_end_encodeSnappyBlockAsm12B
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B:
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
+ JNE repeat_extend_forward_end_encodeSnappyBlockAsm12B
+ LEAL 1(R10), R10
+
+repeat_extend_forward_end_encodeSnappyBlockAsm12B:
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+
+ // emitCopy
+two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+ JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B
+
+two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeSnappyBlockAsm12B
+
+emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeSnappyBlockAsm12B:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeSnappyBlockAsm12B
+
+no_repeat_found_encodeSnappyBlockAsm12B:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBlockAsm12B
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeSnappyBlockAsm12B
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeSnappyBlockAsm12B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBlockAsm12B
+
+candidate3_match_encodeSnappyBlockAsm12B:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeSnappyBlockAsm12B
+
+candidate2_match_encodeSnappyBlockAsm12B:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBlockAsm12B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm12B
+
+match_extend_back_loop_encodeSnappyBlockAsm12B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBlockAsm12B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBlockAsm12B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm12B
+ JMP match_extend_back_loop_encodeSnappyBlockAsm12B
+
+match_extend_back_end_encodeSnappyBlockAsm12B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBlockAsm12B:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm12B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeSnappyBlockAsm12B
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBlockAsm12B
+ JB three_bytes_match_emit_encodeSnappyBlockAsm12B
+
+three_bytes_match_emit_encodeSnappyBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm12B
+
+two_bytes_match_emit_encodeSnappyBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeSnappyBlockAsm12B
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm12B
+
+one_byte_match_emit_encodeSnappyBlockAsm12B:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBlockAsm12B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBlockAsm12B:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBlockAsm12B
+
+memmove_long_match_emit_encodeSnappyBlockAsm12B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeSnappyBlockAsm12B:
+match_nolit_loop_encodeSnappyBlockAsm12B:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBlockAsm12B
+
+matchlen_loopback_match_nolit_encodeSnappyBlockAsm12B:
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
+ JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeSnappyBlockAsm12B
+
+matchlen_loop_match_nolit_encodeSnappyBlockAsm12B:
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeSnappyBlockAsm12B
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm12B:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBlockAsm12B
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm12B
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm12B:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
+ JB match_nolit_end_encodeSnappyBlockAsm12B
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeSnappyBlockAsm12B
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm12B:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeSnappyBlockAsm12B
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeSnappyBlockAsm12B:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_encodeSnappyBlockAsm12B:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm12B
+
+two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm12B
+
+emit_copy_three_match_nolit_encodeSnappyBlockAsm12B:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBlockAsm12B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm12B
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBlockAsm12B:
+ MOVQ $0x000000cf1bbcdcbb, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x18, DI
+ IMULQ R8, DI
+ SHRQ $0x34, DI
+ SHLQ $0x18, BX
+ IMULQ R8, BX
+ SHRQ $0x34, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeSnappyBlockAsm12B
+ INCL CX
+ JMP search_loop_encodeSnappyBlockAsm12B
+
+emit_remainder_encodeSnappyBlockAsm12B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBlockAsm12B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBlockAsm12B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBlockAsm12B
+ JB three_bytes_emit_remainder_encodeSnappyBlockAsm12B
+
+three_bytes_emit_remainder_encodeSnappyBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B
+
+two_bytes_emit_remainder_encodeSnappyBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBlockAsm12B
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B
+
+one_byte_emit_remainder_encodeSnappyBlockAsm12B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBlockAsm12B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B
+
+memmove_long_emit_remainder_encodeSnappyBlockAsm12B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBlockAsm10B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBlockAsm10B(SB), $4120-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000020, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBlockAsm10B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBlockAsm10B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBlockAsm10B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm10B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ SHLQ $0x20, R10
+ IMULQ R8, R10
+ SHRQ $0x36, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeSnappyBlockAsm10B
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
+ JZ repeat_extend_back_end_encodeSnappyBlockAsm10B
+
+repeat_extend_back_loop_encodeSnappyBlockAsm10B:
+ CMPL SI, BX
+ JBE repeat_extend_back_end_encodeSnappyBlockAsm10B
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeSnappyBlockAsm10B
+ LEAL -1(SI), SI
+ DECL DI
+ JNZ repeat_extend_back_loop_encodeSnappyBlockAsm10B
+
+repeat_extend_back_end_encodeSnappyBlockAsm10B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeSnappyBlockAsm10B
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeSnappyBlockAsm10B
+ JB three_bytes_repeat_emit_encodeSnappyBlockAsm10B
+
+three_bytes_repeat_emit_encodeSnappyBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B
+
+two_bytes_repeat_emit_encodeSnappyBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeSnappyBlockAsm10B
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B
+
+one_byte_repeat_emit_encodeSnappyBlockAsm10B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeSnappyBlockAsm10B:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveShort
+ CMPQ DI, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8
+ CMPQ DI, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16
+ CMPQ DI, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8:
+ MOVQ (R8), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16:
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32:
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64:
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+
+memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B
+
+memmove_long_repeat_emit_encodeSnappyBlockAsm10B:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveLong
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(R8)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(R8)(R11*1), X4
+ MOVOU -16(R8)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ DI, R11
+ JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R10, R10
+ CMPL DI, $0x08
+ JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B
+
+matchlen_loopback_repeat_extend_encodeSnappyBlockAsm10B:
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ TESTQ R9, R9
+ JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B
+
+matchlen_loop_repeat_extend_encodeSnappyBlockAsm10B:
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ CMPL DI, $0x08
+ JAE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm10B
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B:
+ CMPL DI, $0x04
+ JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B:
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
+ JB repeat_extend_forward_end_encodeSnappyBlockAsm10B
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B:
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
+ JNE repeat_extend_forward_end_encodeSnappyBlockAsm10B
+ LEAL 1(R10), R10
+
+repeat_extend_forward_end_encodeSnappyBlockAsm10B:
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+
+ // emitCopy
+two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+ JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B
+
+two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeSnappyBlockAsm10B
+
+emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeSnappyBlockAsm10B:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeSnappyBlockAsm10B
+
+no_repeat_found_encodeSnappyBlockAsm10B:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBlockAsm10B
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeSnappyBlockAsm10B
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeSnappyBlockAsm10B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBlockAsm10B
+
+candidate3_match_encodeSnappyBlockAsm10B:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeSnappyBlockAsm10B
+
+candidate2_match_encodeSnappyBlockAsm10B:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBlockAsm10B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm10B
+
+match_extend_back_loop_encodeSnappyBlockAsm10B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBlockAsm10B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBlockAsm10B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm10B
+ JMP match_extend_back_loop_encodeSnappyBlockAsm10B
+
+match_extend_back_end_encodeSnappyBlockAsm10B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBlockAsm10B:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm10B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeSnappyBlockAsm10B
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBlockAsm10B
+ JB three_bytes_match_emit_encodeSnappyBlockAsm10B
+
+three_bytes_match_emit_encodeSnappyBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm10B
+
+two_bytes_match_emit_encodeSnappyBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeSnappyBlockAsm10B
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm10B
+
+one_byte_match_emit_encodeSnappyBlockAsm10B:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBlockAsm10B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBlockAsm10B:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBlockAsm10B
+
+memmove_long_match_emit_encodeSnappyBlockAsm10B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeSnappyBlockAsm10B:
+match_nolit_loop_encodeSnappyBlockAsm10B:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBlockAsm10B
+
+matchlen_loopback_match_nolit_encodeSnappyBlockAsm10B:
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
+ JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeSnappyBlockAsm10B
+
+matchlen_loop_match_nolit_encodeSnappyBlockAsm10B:
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeSnappyBlockAsm10B
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm10B:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBlockAsm10B
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm10B
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm10B:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
+ JB match_nolit_end_encodeSnappyBlockAsm10B
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeSnappyBlockAsm10B
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm10B:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeSnappyBlockAsm10B
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeSnappyBlockAsm10B:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_encodeSnappyBlockAsm10B:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm10B
+
+two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm10B
+
+emit_copy_three_match_nolit_encodeSnappyBlockAsm10B:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBlockAsm10B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm10B
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBlockAsm10B:
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x20, DI
+ IMULQ R8, DI
+ SHRQ $0x36, DI
+ SHLQ $0x20, BX
+ IMULQ R8, BX
+ SHRQ $0x36, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeSnappyBlockAsm10B
+ INCL CX
+ JMP search_loop_encodeSnappyBlockAsm10B
+
+emit_remainder_encodeSnappyBlockAsm10B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBlockAsm10B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBlockAsm10B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBlockAsm10B
+ JB three_bytes_emit_remainder_encodeSnappyBlockAsm10B
+
+three_bytes_emit_remainder_encodeSnappyBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B
+
+two_bytes_emit_remainder_encodeSnappyBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBlockAsm10B
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B
+
+one_byte_emit_remainder_encodeSnappyBlockAsm10B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBlockAsm10B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B
+
+memmove_long_emit_remainder_encodeSnappyBlockAsm10B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBlockAsm8B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBlockAsm8B(SB), $1048-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000008, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBlockAsm8B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBlockAsm8B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBlockAsm8B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x04, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm8B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x38, R9
+ SHLQ $0x20, R10
+ IMULQ R8, R10
+ SHRQ $0x38, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x38, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeSnappyBlockAsm8B
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
+ JZ repeat_extend_back_end_encodeSnappyBlockAsm8B
+
+repeat_extend_back_loop_encodeSnappyBlockAsm8B:
+ CMPL SI, BX
+ JBE repeat_extend_back_end_encodeSnappyBlockAsm8B
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_encodeSnappyBlockAsm8B
+ LEAL -1(SI), SI
+ DECL DI
+ JNZ repeat_extend_back_loop_encodeSnappyBlockAsm8B
+
+repeat_extend_back_end_encodeSnappyBlockAsm8B:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_encodeSnappyBlockAsm8B
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_encodeSnappyBlockAsm8B
+ JB three_bytes_repeat_emit_encodeSnappyBlockAsm8B
+
+three_bytes_repeat_emit_encodeSnappyBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B
+
+two_bytes_repeat_emit_encodeSnappyBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_encodeSnappyBlockAsm8B
+ JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B
+
+one_byte_repeat_emit_encodeSnappyBlockAsm8B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_encodeSnappyBlockAsm8B:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveShort
+ CMPQ DI, $0x08
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8
+ CMPQ DI, $0x10
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16
+ CMPQ DI, $0x20
+ JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8:
+ MOVQ (R8), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16:
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32:
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(DI*1)
+ JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64:
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+
+memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B:
+ MOVQ BX, AX
+ JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B
+
+memmove_long_repeat_emit_encodeSnappyBlockAsm8B:
+ LEAQ (AX)(DI*1), BX
+
+ // genMemMoveLong
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(R8)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(R8)(R11*1), X4
+ MOVOU -16(R8)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ DI, R11
+ JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
+
+emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R10, R10
+ CMPL DI, $0x08
+ JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B
+
+matchlen_loopback_repeat_extend_encodeSnappyBlockAsm8B:
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ TESTQ R9, R9
+ JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B
+
+matchlen_loop_repeat_extend_encodeSnappyBlockAsm8B:
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ CMPL DI, $0x08
+ JAE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm8B
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B:
+ CMPL DI, $0x04
+ JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B:
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
+ JB repeat_extend_forward_end_encodeSnappyBlockAsm8B
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B:
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
+ JNE repeat_extend_forward_end_encodeSnappyBlockAsm8B
+ LEAL 1(R10), R10
+
+repeat_extend_forward_end_encodeSnappyBlockAsm8B:
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+
+ // emitCopy
+two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B
+ MOVB $0xee, (AX)
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+ JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B
+
+two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeSnappyBlockAsm8B
+
+emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, AX
+
+repeat_end_emit_encodeSnappyBlockAsm8B:
+ MOVL CX, 12(SP)
+ JMP search_loop_encodeSnappyBlockAsm8B
+
+no_repeat_found_encodeSnappyBlockAsm8B:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBlockAsm8B
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_encodeSnappyBlockAsm8B
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_encodeSnappyBlockAsm8B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBlockAsm8B
+
+candidate3_match_encodeSnappyBlockAsm8B:
+ ADDL $0x02, CX
+ JMP candidate_match_encodeSnappyBlockAsm8B
+
+candidate2_match_encodeSnappyBlockAsm8B:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBlockAsm8B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm8B
+
+match_extend_back_loop_encodeSnappyBlockAsm8B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBlockAsm8B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBlockAsm8B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBlockAsm8B
+ JMP match_extend_back_loop_encodeSnappyBlockAsm8B
+
+match_extend_back_end_encodeSnappyBlockAsm8B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBlockAsm8B:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm8B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
+ JB one_byte_match_emit_encodeSnappyBlockAsm8B
+ CMPL DI, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBlockAsm8B
+ JB three_bytes_match_emit_encodeSnappyBlockAsm8B
+
+three_bytes_match_emit_encodeSnappyBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm8B
+
+two_bytes_match_emit_encodeSnappyBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB DI, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DI, $0x40
+ JB memmove_match_emit_encodeSnappyBlockAsm8B
+ JMP memmove_long_match_emit_encodeSnappyBlockAsm8B
+
+one_byte_match_emit_encodeSnappyBlockAsm8B:
+ SHLB $0x02, DI
+ MOVB DI, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBlockAsm8B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8:
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16:
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBlockAsm8B:
+ MOVQ DI, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBlockAsm8B
+
+memmove_long_match_emit_encodeSnappyBlockAsm8B:
+ LEAQ (AX)(R8*1), DI
+
+ // genMemMoveLong
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back:
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
+ ADDQ $0x20, R12
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
+
+emit_literal_done_match_emit_encodeSnappyBlockAsm8B:
+match_nolit_loop_encodeSnappyBlockAsm8B:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBlockAsm8B
+
+matchlen_loopback_match_nolit_encodeSnappyBlockAsm8B:
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
+ JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_encodeSnappyBlockAsm8B
+
+matchlen_loop_match_nolit_encodeSnappyBlockAsm8B:
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeSnappyBlockAsm8B
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm8B:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBlockAsm8B
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm8B
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm8B:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
+ JB match_nolit_end_encodeSnappyBlockAsm8B
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeSnappyBlockAsm8B
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm8B:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_encodeSnappyBlockAsm8B
+ LEAL 1(R9), R9
+
+match_nolit_end_encodeSnappyBlockAsm8B:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_encodeSnappyBlockAsm8B:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B
+ MOVB $0xee, (AX)
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm8B
+
+two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm8B
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm8B
+
+emit_copy_three_match_nolit_encodeSnappyBlockAsm8B:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBlockAsm8B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBlockAsm8B
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBlockAsm8B:
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x20, DI
+ IMULQ R8, DI
+ SHRQ $0x38, DI
+ SHLQ $0x20, BX
+ IMULQ R8, BX
+ SHRQ $0x38, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_encodeSnappyBlockAsm8B
+ INCL CX
+ JMP search_loop_encodeSnappyBlockAsm8B
+
+emit_remainder_encodeSnappyBlockAsm8B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBlockAsm8B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBlockAsm8B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBlockAsm8B
+ JB three_bytes_emit_remainder_encodeSnappyBlockAsm8B
+
+three_bytes_emit_remainder_encodeSnappyBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B
+
+two_bytes_emit_remainder_encodeSnappyBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBlockAsm8B
+ JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B
+
+one_byte_emit_remainder_encodeSnappyBlockAsm8B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBlockAsm8B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B
+
+memmove_long_emit_remainder_encodeSnappyBlockAsm8B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBetterBlockAsm(SB), $589848-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00001200, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBetterBlockAsm:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBetterBlockAsm
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBetterBlockAsm:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x07, BX
+ CMPL BX, $0x63
+ JBE check_maxskip_ok_encodeSnappyBetterBlockAsm
+ LEAL 100(CX), BX
+ JMP check_maxskip_cont_encodeSnappyBetterBlockAsm
+
+check_maxskip_ok_encodeSnappyBetterBlockAsm:
+ LEAL 1(CX)(BX*1), BX
+
+check_maxskip_cont_encodeSnappyBetterBlockAsm:
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 524312(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 524312(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm
+ CMPQ R10, SI
+ JNE no_short_found_encodeSnappyBetterBlockAsm
+ MOVL DI, BX
+ JMP candidate_match_encodeSnappyBetterBlockAsm
+
+no_short_found_encodeSnappyBetterBlockAsm:
+ CMPL R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm
+ CMPL R10, SI
+ JEQ candidateS_match_encodeSnappyBetterBlockAsm
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBetterBlockAsm
+
+candidateS_match_encodeSnappyBetterBlockAsm:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBetterBlockAsm:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm
+
+match_extend_back_loop_encodeSnappyBetterBlockAsm:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBetterBlockAsm
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBetterBlockAsm
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm
+ JMP match_extend_back_loop_encodeSnappyBetterBlockAsm
+
+match_extend_back_end_encodeSnappyBetterBlockAsm:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 5(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBetterBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBetterBlockAsm:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm
+
+matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm:
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
+ JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm
+
+matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm:
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
+ JB match_nolit_end_encodeSnappyBetterBlockAsm
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeSnappyBetterBlockAsm
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeSnappyBetterBlockAsm:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ CMPL R11, $0x01
+ JA match_length_ok_encodeSnappyBetterBlockAsm
+ CMPL DI, $0x0000ffff
+ JBE match_length_ok_encodeSnappyBetterBlockAsm
+ MOVL 20(SP), CX
+ INCL CX
+ JMP search_loop_encodeSnappyBetterBlockAsm
+
+match_length_ok_encodeSnappyBetterBlockAsm:
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeSnappyBetterBlockAsm
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBetterBlockAsm
+ CMPL BX, $0x00010000
+ JB three_bytes_match_emit_encodeSnappyBetterBlockAsm
+ CMPL BX, $0x01000000
+ JB four_bytes_match_emit_encodeSnappyBetterBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL BX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
+
+four_bytes_match_emit_encodeSnappyBetterBlockAsm:
+ MOVL BX, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
+
+three_bytes_match_emit_encodeSnappyBetterBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
+
+two_bytes_match_emit_encodeSnappyBetterBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeSnappyBetterBlockAsm
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
+
+one_byte_match_emit_encodeSnappyBetterBlockAsm:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBetterBlockAsm:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm
+
+memmove_long_match_emit_encodeSnappyBetterBlockAsm:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeSnappyBetterBlockAsm:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL DI, $0x00010000
+ JB two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm
+
+four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm:
+ CMPL R11, $0x40
+ JBE four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm
+ MOVB $0xff, (AX)
+ MOVL DI, 1(AX)
+ LEAL -64(R11), R11
+ ADDQ $0x05, AX
+ CMPL R11, $0x04
+ JB four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm
+ JMP four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm
+
+four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm:
+ TESTL R11, R11
+ JZ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
+ XORL BX, BX
+ LEAL -1(BX)(R11*4), R11
+ MOVB R11, (AX)
+ MOVL DI, 1(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
+
+two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm:
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm
+
+two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm
+ CMPL DI, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
+
+emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBetterBlockAsm:
+ MOVQ $0x00cf1bbcdcbfa563, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x32, R10
+ SHLQ $0x08, R11
+ IMULQ BX, R11
+ SHRQ $0x2f, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x32, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 524312(SP)(R10*4)
+ MOVL R13, 524312(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeSnappyBetterBlockAsm:
+ CMPQ DI, R8
+ JAE search_loop_encodeSnappyBetterBlockAsm
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x08, R10
+ IMULQ BX, R10
+ SHRQ $0x2f, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeSnappyBetterBlockAsm
+
+emit_remainder_encodeSnappyBetterBlockAsm:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 5(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBetterBlockAsm
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBetterBlockAsm:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm
+ CMPL DX, $0x00010000
+ JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm
+ CMPL DX, $0x01000000
+ JB four_bytes_emit_remainder_encodeSnappyBetterBlockAsm
+ MOVB $0xfc, (AX)
+ MOVL DX, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
+
+four_bytes_emit_remainder_encodeSnappyBetterBlockAsm:
+ MOVL DX, BX
+ SHRL $0x10, BX
+ MOVB $0xf8, (AX)
+ MOVW DX, 1(AX)
+ MOVB BL, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
+
+three_bytes_emit_remainder_encodeSnappyBetterBlockAsm:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
+
+two_bytes_emit_remainder_encodeSnappyBetterBlockAsm:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBetterBlockAsm
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
+
+one_byte_emit_remainder_encodeSnappyBetterBlockAsm:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBetterBlockAsm:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm
+
+memmove_long_emit_remainder_encodeSnappyBetterBlockAsm:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBetterBlockAsm64K(SB), $327704-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000a00, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBetterBlockAsm64K:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBetterBlockAsm64K
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBetterBlockAsm64K:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x07, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm64K
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x30, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 262168(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 262168(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm64K
+ CMPQ R10, SI
+ JNE no_short_found_encodeSnappyBetterBlockAsm64K
+ MOVL DI, BX
+ JMP candidate_match_encodeSnappyBetterBlockAsm64K
+
+no_short_found_encodeSnappyBetterBlockAsm64K:
+ CMPL R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm64K
+ CMPL R10, SI
+ JEQ candidateS_match_encodeSnappyBetterBlockAsm64K
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBetterBlockAsm64K
+
+candidateS_match_encodeSnappyBetterBlockAsm64K:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x30, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm64K
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBetterBlockAsm64K:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K
+
+match_extend_back_loop_encodeSnappyBetterBlockAsm64K:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBetterBlockAsm64K
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBetterBlockAsm64K
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K
+ JMP match_extend_back_loop_encodeSnappyBetterBlockAsm64K
+
+match_extend_back_end_encodeSnappyBetterBlockAsm64K:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBetterBlockAsm64K
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBetterBlockAsm64K:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K
+
+matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm64K:
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
+ JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm64K
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm64K
+
+matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm64K:
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm64K
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
+ JB match_nolit_end_encodeSnappyBetterBlockAsm64K
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm64K
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeSnappyBetterBlockAsm64K
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeSnappyBetterBlockAsm64K:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeSnappyBetterBlockAsm64K
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBetterBlockAsm64K
+ JB three_bytes_match_emit_encodeSnappyBetterBlockAsm64K
+
+three_bytes_match_emit_encodeSnappyBetterBlockAsm64K:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K
+
+two_bytes_match_emit_encodeSnappyBetterBlockAsm64K:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeSnappyBetterBlockAsm64K
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K
+
+one_byte_match_emit_encodeSnappyBetterBlockAsm64K:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBetterBlockAsm64K:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K
+
+memmove_long_match_emit_encodeSnappyBetterBlockAsm64K:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K:
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K
+
+two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K
+ CMPL DI, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K
+
+emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm64K
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K:
+ MOVQ $0x00cf1bbcdcbfa563, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x30, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x32, R10
+ SHLQ $0x08, R11
+ IMULQ BX, R11
+ SHRQ $0x30, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x32, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 262168(SP)(R10*4)
+ MOVL R13, 262168(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeSnappyBetterBlockAsm64K:
+ CMPQ DI, R8
+ JAE search_loop_encodeSnappyBetterBlockAsm64K
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x30, R9
+ SHLQ $0x08, R10
+ IMULQ BX, R10
+ SHRQ $0x30, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeSnappyBetterBlockAsm64K
+
+emit_remainder_encodeSnappyBetterBlockAsm64K:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBetterBlockAsm64K
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBetterBlockAsm64K:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K
+ JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBetterBlockAsm64K
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBetterBlockAsm64K:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBetterBlockAsm12B(SB), $81944-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000280, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBetterBlockAsm12B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBetterBlockAsm12B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBetterBlockAsm12B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm12B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x34, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 65560(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 65560(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm12B
+ CMPQ R10, SI
+ JNE no_short_found_encodeSnappyBetterBlockAsm12B
+ MOVL DI, BX
+ JMP candidate_match_encodeSnappyBetterBlockAsm12B
+
+no_short_found_encodeSnappyBetterBlockAsm12B:
+ CMPL R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm12B
+ CMPL R10, SI
+ JEQ candidateS_match_encodeSnappyBetterBlockAsm12B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBetterBlockAsm12B
+
+candidateS_match_encodeSnappyBetterBlockAsm12B:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm12B
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBetterBlockAsm12B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B
+
+match_extend_back_loop_encodeSnappyBetterBlockAsm12B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBetterBlockAsm12B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBetterBlockAsm12B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B
+ JMP match_extend_back_loop_encodeSnappyBetterBlockAsm12B
+
+match_extend_back_end_encodeSnappyBetterBlockAsm12B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBetterBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBetterBlockAsm12B:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B
+
+matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm12B:
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
+ JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm12B
+
+matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm12B:
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm12B
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
+ JB match_nolit_end_encodeSnappyBetterBlockAsm12B
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm12B
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeSnappyBetterBlockAsm12B
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeSnappyBetterBlockAsm12B:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeSnappyBetterBlockAsm12B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBetterBlockAsm12B
+ JB three_bytes_match_emit_encodeSnappyBetterBlockAsm12B
+
+three_bytes_match_emit_encodeSnappyBetterBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B
+
+two_bytes_match_emit_encodeSnappyBetterBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeSnappyBetterBlockAsm12B
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B
+
+one_byte_match_emit_encodeSnappyBetterBlockAsm12B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBetterBlockAsm12B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B
+
+memmove_long_match_emit_encodeSnappyBetterBlockAsm12B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B:
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B
+
+two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B
+ CMPL DI, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B
+
+emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm12B
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B:
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x32, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x34, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x32, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x34, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 65560(SP)(R10*4)
+ MOVL R13, 65560(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeSnappyBetterBlockAsm12B:
+ CMPQ DI, R8
+ JAE search_loop_encodeSnappyBetterBlockAsm12B
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x32, R9
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeSnappyBetterBlockAsm12B
+
+emit_remainder_encodeSnappyBetterBlockAsm12B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBetterBlockAsm12B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBetterBlockAsm12B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B
+ JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBetterBlockAsm12B
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBetterBlockAsm12B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBetterBlockAsm10B(SB), $20504-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x000000a0, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBetterBlockAsm10B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBetterBlockAsm10B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBetterBlockAsm10B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm10B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x36, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 16408(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 16408(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm10B
+ CMPQ R10, SI
+ JNE no_short_found_encodeSnappyBetterBlockAsm10B
+ MOVL DI, BX
+ JMP candidate_match_encodeSnappyBetterBlockAsm10B
+
+no_short_found_encodeSnappyBetterBlockAsm10B:
+ CMPL R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm10B
+ CMPL R10, SI
+ JEQ candidateS_match_encodeSnappyBetterBlockAsm10B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBetterBlockAsm10B
+
+candidateS_match_encodeSnappyBetterBlockAsm10B:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm10B
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBetterBlockAsm10B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B
+
+match_extend_back_loop_encodeSnappyBetterBlockAsm10B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBetterBlockAsm10B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBetterBlockAsm10B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B
+ JMP match_extend_back_loop_encodeSnappyBetterBlockAsm10B
+
+match_extend_back_end_encodeSnappyBetterBlockAsm10B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBetterBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBetterBlockAsm10B:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B
+
+matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm10B:
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
+ JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm10B
+
+matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm10B:
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm10B
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
+ JB match_nolit_end_encodeSnappyBetterBlockAsm10B
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm10B
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeSnappyBetterBlockAsm10B
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeSnappyBetterBlockAsm10B:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeSnappyBetterBlockAsm10B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBetterBlockAsm10B
+ JB three_bytes_match_emit_encodeSnappyBetterBlockAsm10B
+
+three_bytes_match_emit_encodeSnappyBetterBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B
+
+two_bytes_match_emit_encodeSnappyBetterBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeSnappyBetterBlockAsm10B
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B
+
+one_byte_match_emit_encodeSnappyBetterBlockAsm10B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBetterBlockAsm10B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B
+
+memmove_long_match_emit_encodeSnappyBetterBlockAsm10B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B:
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B
+
+two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B
+ CMPL DI, $0x00000800
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B
+
+emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm10B
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B:
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x34, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x36, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x34, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x36, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 16408(SP)(R10*4)
+ MOVL R13, 16408(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeSnappyBetterBlockAsm10B:
+ CMPQ DI, R8
+ JAE search_loop_encodeSnappyBetterBlockAsm10B
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x34, R9
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x34, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeSnappyBetterBlockAsm10B
+
+emit_remainder_encodeSnappyBetterBlockAsm10B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBetterBlockAsm10B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBetterBlockAsm10B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B
+ JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBetterBlockAsm10B
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBetterBlockAsm10B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBetterBlockAsm8B(SB), $5144-56
+ MOVQ dst_base+0(FP), AX
+ MOVQ $0x00000028, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_encodeSnappyBetterBlockAsm8B:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_encodeSnappyBetterBlockAsm8B
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+32(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL $0x00000000, 16(SP)
+ MOVQ src_base+24(FP), DX
+
+search_loop_encodeSnappyBetterBlockAsm8B:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x04, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm8B
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x38, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 4120(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 4120(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm8B
+ CMPQ R10, SI
+ JNE no_short_found_encodeSnappyBetterBlockAsm8B
+ MOVL DI, BX
+ JMP candidate_match_encodeSnappyBetterBlockAsm8B
+
+no_short_found_encodeSnappyBetterBlockAsm8B:
+ CMPL R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm8B
+ CMPL R10, SI
+ JEQ candidateS_match_encodeSnappyBetterBlockAsm8B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBetterBlockAsm8B
+
+candidateS_match_encodeSnappyBetterBlockAsm8B:
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ MOVL 24(SP)(R9*4), BX
+ INCL CX
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm8B
+ DECL CX
+ MOVL DI, BX
+
+candidate_match_encodeSnappyBetterBlockAsm8B:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B
+
+match_extend_back_loop_encodeSnappyBetterBlockAsm8B:
+ CMPL CX, SI
+ JBE match_extend_back_end_encodeSnappyBetterBlockAsm8B
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_encodeSnappyBetterBlockAsm8B
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B
+ JMP match_extend_back_loop_encodeSnappyBetterBlockAsm8B
+
+match_extend_back_end_encodeSnappyBetterBlockAsm8B:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_encodeSnappyBetterBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_dst_size_check_encodeSnappyBetterBlockAsm8B:
+ MOVL CX, SI
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
+
+ // matchLen
+ XORL R11, R11
+ CMPL DI, $0x08
+ JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B
+
+matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm8B:
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
+ JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm8B
+
+matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm8B:
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
+ JAE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm8B
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B:
+ CMPL DI, $0x04
+ JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
+ LEAL -4(DI), DI
+ LEAL 4(R11), R11
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B:
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
+ JB match_nolit_end_encodeSnappyBetterBlockAsm8B
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
+ LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm8B
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B:
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
+ JNE match_nolit_end_encodeSnappyBetterBlockAsm8B
+ LEAL 1(R11), R11
+
+match_nolit_end_encodeSnappyBetterBlockAsm8B:
+ MOVL CX, DI
+ SUBL BX, DI
+
+ // Check if repeat
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
+ JB one_byte_match_emit_encodeSnappyBetterBlockAsm8B
+ CMPL BX, $0x00000100
+ JB two_bytes_match_emit_encodeSnappyBetterBlockAsm8B
+ JB three_bytes_match_emit_encodeSnappyBetterBlockAsm8B
+
+three_bytes_match_emit_encodeSnappyBetterBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW BX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B
+
+two_bytes_match_emit_encodeSnappyBetterBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB BL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_match_emit_encodeSnappyBetterBlockAsm8B
+ JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B
+
+one_byte_match_emit_encodeSnappyBetterBlockAsm8B:
+ SHLB $0x02, BL
+ MOVB BL, (AX)
+ ADDQ $0x01, AX
+
+memmove_match_emit_encodeSnappyBetterBlockAsm8B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8:
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16:
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32:
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64:
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B:
+ MOVQ BX, AX
+ JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B
+
+memmove_long_match_emit_encodeSnappyBetterBlockAsm8B:
+ LEAQ (AX)(R8*1), BX
+
+ // genMemMoveLong
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
+
+emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B:
+ ADDL R11, CX
+ ADDL $0x04, R11
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B:
+ CMPL R11, $0x40
+ JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B
+ MOVB $0xee, (AX)
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B
+
+two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B:
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
+ JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B
+
+emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B:
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_encodeSnappyBetterBlockAsm8B
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B:
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x36, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x38, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x36, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x38, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 4120(SP)(R10*4)
+ MOVL R13, 4120(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
+
+index_loop_encodeSnappyBetterBlockAsm8B:
+ CMPQ DI, R8
+ JAE search_loop_encodeSnappyBetterBlockAsm8B
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x36, R9
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x36, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
+ ADDQ $0x02, SI
+ ADDQ $0x02, DI
+ JMP index_loop_encodeSnappyBetterBlockAsm8B
+
+emit_remainder_encodeSnappyBetterBlockAsm8B:
+ MOVQ src_len+32(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_encodeSnappyBetterBlockAsm8B
+ MOVQ $0x00000000, ret+48(FP)
+ RET
+
+emit_remainder_ok_encodeSnappyBetterBlockAsm8B:
+ MOVQ src_len+32(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), DX
+ CMPL DX, $0x3c
+ JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B
+ CMPL DX, $0x00000100
+ JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B
+ JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B:
+ MOVB $0xf4, (AX)
+ MOVW DX, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B:
+ MOVB $0xf0, (AX)
+ MOVB DL, 1(AX)
+ ADDQ $0x02, AX
+ CMPL DX, $0x40
+ JB memmove_emit_remainder_encodeSnappyBetterBlockAsm8B
+ JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B:
+ SHLB $0x02, DL
+ MOVB DL, (AX)
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_encodeSnappyBetterBlockAsm8B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3
+ CMPQ BX, $0x08
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7
+ CMPQ BX, $0x10
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16
+ CMPQ BX, $0x20
+ JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32
+ JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(BX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(BX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+
+memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B:
+ MOVQ DX, AX
+ JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B:
+ LEAQ (AX)(SI*1), DX
+ MOVL SI, BX
+
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(BX*1), X2
+ MOVOU -16(CX)(BX*1), X3
+ MOVQ BX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back
+
+emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ BX, R8
+ JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(BX*1)
+ MOVOU X3, -16(AX)(BX*1)
+ MOVQ DX, AX
+
+emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ AX, ret+48(FP)
+ RET
+
+// func calcBlockSize(src []byte) int
+// Requires: BMI, SSE2
+TEXT ·calcBlockSize(SB), $32792-32
+ XORQ AX, AX
+ MOVQ $0x00000100, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_calcBlockSize:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_calcBlockSize
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+8(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+0(FP), DX
+
+search_loop_calcBlockSize:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_calcBlockSize
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x33, R9
+ SHLQ $0x10, R10
+ IMULQ R8, R10
+ SHRQ $0x33, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x33, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_calcBlockSize
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
+ JZ repeat_extend_back_end_calcBlockSize
+
+repeat_extend_back_loop_calcBlockSize:
+ CMPL SI, BX
+ JBE repeat_extend_back_end_calcBlockSize
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_calcBlockSize
+ LEAL -1(SI), SI
+ DECL DI
+ JNZ repeat_extend_back_loop_calcBlockSize
+
+repeat_extend_back_end_calcBlockSize:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_calcBlockSize
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_calcBlockSize
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_calcBlockSize
+ CMPL BX, $0x00010000
+ JB three_bytes_repeat_emit_calcBlockSize
+ CMPL BX, $0x01000000
+ JB four_bytes_repeat_emit_calcBlockSize
+ ADDQ $0x05, AX
+ JMP memmove_long_repeat_emit_calcBlockSize
+
+four_bytes_repeat_emit_calcBlockSize:
+ ADDQ $0x04, AX
+ JMP memmove_long_repeat_emit_calcBlockSize
+
+three_bytes_repeat_emit_calcBlockSize:
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_calcBlockSize
+
+two_bytes_repeat_emit_calcBlockSize:
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_calcBlockSize
+ JMP memmove_long_repeat_emit_calcBlockSize
+
+one_byte_repeat_emit_calcBlockSize:
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_calcBlockSize:
+ LEAQ (AX)(DI*1), AX
+ JMP emit_literal_done_repeat_emit_calcBlockSize
+
+memmove_long_repeat_emit_calcBlockSize:
+ LEAQ (AX)(DI*1), AX
+
+emit_literal_done_repeat_emit_calcBlockSize:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+8(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R10, R10
+ CMPL DI, $0x08
+ JB matchlen_match4_repeat_extend_calcBlockSize
+
+matchlen_loopback_repeat_extend_calcBlockSize:
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ TESTQ R9, R9
+ JZ matchlen_loop_repeat_extend_calcBlockSize
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP repeat_extend_forward_end_calcBlockSize
+
+matchlen_loop_repeat_extend_calcBlockSize:
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ CMPL DI, $0x08
+ JAE matchlen_loopback_repeat_extend_calcBlockSize
+
+matchlen_match4_repeat_extend_calcBlockSize:
+ CMPL DI, $0x04
+ JB matchlen_match2_repeat_extend_calcBlockSize
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
+ JNE matchlen_match2_repeat_extend_calcBlockSize
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
+
+matchlen_match2_repeat_extend_calcBlockSize:
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_calcBlockSize
+ JB repeat_extend_forward_end_calcBlockSize
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
+ JNE matchlen_match1_repeat_extend_calcBlockSize
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_calcBlockSize
+
+matchlen_match1_repeat_extend_calcBlockSize:
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
+ JNE repeat_extend_forward_end_calcBlockSize
+ LEAL 1(R10), R10
+
+repeat_extend_forward_end_calcBlockSize:
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+
+ // emitCopy
+ CMPL SI, $0x00010000
+ JB two_byte_offset_repeat_as_copy_calcBlockSize
+
+four_bytes_loop_back_repeat_as_copy_calcBlockSize:
+ CMPL BX, $0x40
+ JBE four_bytes_remain_repeat_as_copy_calcBlockSize
+ LEAL -64(BX), BX
+ ADDQ $0x05, AX
+ CMPL BX, $0x04
+ JB four_bytes_remain_repeat_as_copy_calcBlockSize
+ JMP four_bytes_loop_back_repeat_as_copy_calcBlockSize
+
+four_bytes_remain_repeat_as_copy_calcBlockSize:
+ TESTL BX, BX
+ JZ repeat_end_emit_calcBlockSize
+ XORL BX, BX
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_calcBlockSize
+
+two_byte_offset_repeat_as_copy_calcBlockSize:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_calcBlockSize
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+ JMP two_byte_offset_repeat_as_copy_calcBlockSize
+
+two_byte_offset_short_repeat_as_copy_calcBlockSize:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_calcBlockSize
+ CMPL SI, $0x00000800
+ JAE emit_copy_three_repeat_as_copy_calcBlockSize
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_calcBlockSize
+
+emit_copy_three_repeat_as_copy_calcBlockSize:
+ ADDQ $0x03, AX
+
+repeat_end_emit_calcBlockSize:
+ MOVL CX, 12(SP)
+ JMP search_loop_calcBlockSize
+
+no_repeat_found_calcBlockSize:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_calcBlockSize
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_calcBlockSize
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_calcBlockSize
+ MOVL 20(SP), CX
+ JMP search_loop_calcBlockSize
+
+candidate3_match_calcBlockSize:
+ ADDL $0x02, CX
+ JMP candidate_match_calcBlockSize
+
+candidate2_match_calcBlockSize:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_calcBlockSize:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_calcBlockSize
+
+match_extend_back_loop_calcBlockSize:
+ CMPL CX, SI
+ JBE match_extend_back_end_calcBlockSize
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_calcBlockSize
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_calcBlockSize
+ JMP match_extend_back_loop_calcBlockSize
+
+match_extend_back_end_calcBlockSize:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 5(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_calcBlockSize
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+match_dst_size_check_calcBlockSize:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_calcBlockSize
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
+ JB one_byte_match_emit_calcBlockSize
+ CMPL SI, $0x00000100
+ JB two_bytes_match_emit_calcBlockSize
+ CMPL SI, $0x00010000
+ JB three_bytes_match_emit_calcBlockSize
+ CMPL SI, $0x01000000
+ JB four_bytes_match_emit_calcBlockSize
+ ADDQ $0x05, AX
+ JMP memmove_long_match_emit_calcBlockSize
+
+four_bytes_match_emit_calcBlockSize:
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_calcBlockSize
+
+three_bytes_match_emit_calcBlockSize:
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_calcBlockSize
+
+two_bytes_match_emit_calcBlockSize:
+ ADDQ $0x02, AX
+ CMPL SI, $0x40
+ JB memmove_match_emit_calcBlockSize
+ JMP memmove_long_match_emit_calcBlockSize
+
+one_byte_match_emit_calcBlockSize:
+ ADDQ $0x01, AX
+
+memmove_match_emit_calcBlockSize:
+ LEAQ (AX)(R8*1), AX
+ JMP emit_literal_done_match_emit_calcBlockSize
+
+memmove_long_match_emit_calcBlockSize:
+ LEAQ (AX)(R8*1), AX
+
+emit_literal_done_match_emit_calcBlockSize:
+match_nolit_loop_calcBlockSize:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+8(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_calcBlockSize
+
+matchlen_loopback_match_nolit_calcBlockSize:
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
+ JZ matchlen_loop_match_nolit_calcBlockSize
+
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_calcBlockSize
+
+matchlen_loop_match_nolit_calcBlockSize:
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
+ JAE matchlen_loopback_match_nolit_calcBlockSize
+
+matchlen_match4_match_nolit_calcBlockSize:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_calcBlockSize
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_calcBlockSize
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_calcBlockSize:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_calcBlockSize
+ JB match_nolit_end_calcBlockSize
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_calcBlockSize
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_calcBlockSize
+
+matchlen_match1_match_nolit_calcBlockSize:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_calcBlockSize
+ LEAL 1(R9), R9
+
+match_nolit_end_calcBlockSize:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL BX, $0x00010000
+ JB two_byte_offset_match_nolit_calcBlockSize
+
+four_bytes_loop_back_match_nolit_calcBlockSize:
+ CMPL R9, $0x40
+ JBE four_bytes_remain_match_nolit_calcBlockSize
+ LEAL -64(R9), R9
+ ADDQ $0x05, AX
+ CMPL R9, $0x04
+ JB four_bytes_remain_match_nolit_calcBlockSize
+ JMP four_bytes_loop_back_match_nolit_calcBlockSize
+
+four_bytes_remain_match_nolit_calcBlockSize:
+ TESTL R9, R9
+ JZ match_nolit_emitcopy_end_calcBlockSize
+ XORL BX, BX
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_calcBlockSize
+
+two_byte_offset_match_nolit_calcBlockSize:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_calcBlockSize
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_calcBlockSize
+
+two_byte_offset_short_match_nolit_calcBlockSize:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_calcBlockSize
+ CMPL BX, $0x00000800
+ JAE emit_copy_three_match_nolit_calcBlockSize
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_calcBlockSize
+
+emit_copy_three_match_nolit_calcBlockSize:
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_calcBlockSize:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_calcBlockSize
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_calcBlockSize
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+match_nolit_dst_ok_calcBlockSize:
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x10, DI
+ IMULQ R8, DI
+ SHRQ $0x33, DI
+ SHLQ $0x10, BX
+ IMULQ R8, BX
+ SHRQ $0x33, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_calcBlockSize
+ INCL CX
+ JMP search_loop_calcBlockSize
+
+emit_remainder_calcBlockSize:
+ MOVQ src_len+8(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 5(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_calcBlockSize
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+emit_remainder_ok_calcBlockSize:
+ MOVQ src_len+8(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_calcBlockSize
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), CX
+ CMPL CX, $0x3c
+ JB one_byte_emit_remainder_calcBlockSize
+ CMPL CX, $0x00000100
+ JB two_bytes_emit_remainder_calcBlockSize
+ CMPL CX, $0x00010000
+ JB three_bytes_emit_remainder_calcBlockSize
+ CMPL CX, $0x01000000
+ JB four_bytes_emit_remainder_calcBlockSize
+ ADDQ $0x05, AX
+ JMP memmove_long_emit_remainder_calcBlockSize
+
+four_bytes_emit_remainder_calcBlockSize:
+ ADDQ $0x04, AX
+ JMP memmove_long_emit_remainder_calcBlockSize
+
+three_bytes_emit_remainder_calcBlockSize:
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_calcBlockSize
+
+two_bytes_emit_remainder_calcBlockSize:
+ ADDQ $0x02, AX
+ CMPL CX, $0x40
+ JB memmove_emit_remainder_calcBlockSize
+ JMP memmove_long_emit_remainder_calcBlockSize
+
+one_byte_emit_remainder_calcBlockSize:
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_calcBlockSize:
+ LEAQ (AX)(SI*1), AX
+ JMP emit_literal_done_emit_remainder_calcBlockSize
+
+memmove_long_emit_remainder_calcBlockSize:
+ LEAQ (AX)(SI*1), AX
+
+emit_literal_done_emit_remainder_calcBlockSize:
+ MOVQ AX, ret+24(FP)
+ RET
+
+// func calcBlockSizeSmall(src []byte) int
+// Requires: BMI, SSE2
+TEXT ·calcBlockSizeSmall(SB), $2072-32
+ XORQ AX, AX
+ MOVQ $0x00000010, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_calcBlockSizeSmall:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_calcBlockSizeSmall
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+8(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+0(FP), DX
+
+search_loop_calcBlockSizeSmall:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x04, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JAE emit_remainder_calcBlockSizeSmall
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x37, R9
+ SHLQ $0x20, R10
+ IMULQ R8, R10
+ SHRQ $0x37, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x37, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_calcBlockSizeSmall
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
+ JZ repeat_extend_back_end_calcBlockSizeSmall
+
+repeat_extend_back_loop_calcBlockSizeSmall:
+ CMPL SI, BX
+ JBE repeat_extend_back_end_calcBlockSizeSmall
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_calcBlockSizeSmall
+ LEAL -1(SI), SI
+ DECL DI
+ JNZ repeat_extend_back_loop_calcBlockSizeSmall
+
+repeat_extend_back_end_calcBlockSizeSmall:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_calcBlockSizeSmall
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JB one_byte_repeat_emit_calcBlockSizeSmall
+ CMPL BX, $0x00000100
+ JB two_bytes_repeat_emit_calcBlockSizeSmall
+ JB three_bytes_repeat_emit_calcBlockSizeSmall
+
+three_bytes_repeat_emit_calcBlockSizeSmall:
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_calcBlockSizeSmall
+
+two_bytes_repeat_emit_calcBlockSizeSmall:
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JB memmove_repeat_emit_calcBlockSizeSmall
+ JMP memmove_long_repeat_emit_calcBlockSizeSmall
+
+one_byte_repeat_emit_calcBlockSizeSmall:
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_calcBlockSizeSmall:
+ LEAQ (AX)(DI*1), AX
+ JMP emit_literal_done_repeat_emit_calcBlockSizeSmall
+
+memmove_long_repeat_emit_calcBlockSizeSmall:
+ LEAQ (AX)(DI*1), AX
+
+emit_literal_done_repeat_emit_calcBlockSizeSmall:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+8(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R10, R10
+ CMPL DI, $0x08
+ JB matchlen_match4_repeat_extend_calcBlockSizeSmall
+
+matchlen_loopback_repeat_extend_calcBlockSizeSmall:
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ TESTQ R9, R9
+ JZ matchlen_loop_repeat_extend_calcBlockSizeSmall
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP repeat_extend_forward_end_calcBlockSizeSmall
+
+matchlen_loop_repeat_extend_calcBlockSizeSmall:
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ CMPL DI, $0x08
+ JAE matchlen_loopback_repeat_extend_calcBlockSizeSmall
+
+matchlen_match4_repeat_extend_calcBlockSizeSmall:
+ CMPL DI, $0x04
+ JB matchlen_match2_repeat_extend_calcBlockSizeSmall
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
+ JNE matchlen_match2_repeat_extend_calcBlockSizeSmall
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
+
+matchlen_match2_repeat_extend_calcBlockSizeSmall:
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_calcBlockSizeSmall
+ JB repeat_extend_forward_end_calcBlockSizeSmall
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
+ JNE matchlen_match1_repeat_extend_calcBlockSizeSmall
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_calcBlockSizeSmall
+
+matchlen_match1_repeat_extend_calcBlockSizeSmall:
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
+ JNE repeat_extend_forward_end_calcBlockSizeSmall
+ LEAL 1(R10), R10
+
+repeat_extend_forward_end_calcBlockSizeSmall:
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+
+ // emitCopy
+two_byte_offset_repeat_as_copy_calcBlockSizeSmall:
+ CMPL BX, $0x40
+ JBE two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+ JMP two_byte_offset_repeat_as_copy_calcBlockSizeSmall
+
+two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall:
+ MOVL BX, SI
+ SHLL $0x02, SI
+ CMPL BX, $0x0c
+ JAE emit_copy_three_repeat_as_copy_calcBlockSizeSmall
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_calcBlockSizeSmall
+
+emit_copy_three_repeat_as_copy_calcBlockSizeSmall:
+ ADDQ $0x03, AX
+
+repeat_end_emit_calcBlockSizeSmall:
+ MOVL CX, 12(SP)
+ JMP search_loop_calcBlockSizeSmall
+
+no_repeat_found_calcBlockSizeSmall:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_calcBlockSizeSmall
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_calcBlockSizeSmall
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_calcBlockSizeSmall
+ MOVL 20(SP), CX
+ JMP search_loop_calcBlockSizeSmall
+
+candidate3_match_calcBlockSizeSmall:
+ ADDL $0x02, CX
+ JMP candidate_match_calcBlockSizeSmall
+
+candidate2_match_calcBlockSizeSmall:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_calcBlockSizeSmall:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_calcBlockSizeSmall
+
+match_extend_back_loop_calcBlockSizeSmall:
+ CMPL CX, SI
+ JBE match_extend_back_end_calcBlockSizeSmall
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_calcBlockSizeSmall
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_calcBlockSizeSmall
+ JMP match_extend_back_loop_calcBlockSizeSmall
+
+match_extend_back_end_calcBlockSizeSmall:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JB match_dst_size_check_calcBlockSizeSmall
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+match_dst_size_check_calcBlockSizeSmall:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_calcBlockSizeSmall
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
+ JB one_byte_match_emit_calcBlockSizeSmall
+ CMPL SI, $0x00000100
+ JB two_bytes_match_emit_calcBlockSizeSmall
+ JB three_bytes_match_emit_calcBlockSizeSmall
+
+three_bytes_match_emit_calcBlockSizeSmall:
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_calcBlockSizeSmall
+
+two_bytes_match_emit_calcBlockSizeSmall:
+ ADDQ $0x02, AX
+ CMPL SI, $0x40
+ JB memmove_match_emit_calcBlockSizeSmall
+ JMP memmove_long_match_emit_calcBlockSizeSmall
+
+one_byte_match_emit_calcBlockSizeSmall:
+ ADDQ $0x01, AX
+
+memmove_match_emit_calcBlockSizeSmall:
+ LEAQ (AX)(R8*1), AX
+ JMP emit_literal_done_match_emit_calcBlockSizeSmall
+
+memmove_long_match_emit_calcBlockSizeSmall:
+ LEAQ (AX)(R8*1), AX
+
+emit_literal_done_match_emit_calcBlockSizeSmall:
+match_nolit_loop_calcBlockSizeSmall:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+8(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+ CMPL SI, $0x08
+ JB matchlen_match4_match_nolit_calcBlockSizeSmall
+
+matchlen_loopback_match_nolit_calcBlockSizeSmall:
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
+ JZ matchlen_loop_match_nolit_calcBlockSizeSmall
+
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_calcBlockSizeSmall
+
+matchlen_loop_match_nolit_calcBlockSizeSmall:
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
+ JAE matchlen_loopback_match_nolit_calcBlockSizeSmall
+
+matchlen_match4_match_nolit_calcBlockSizeSmall:
+ CMPL SI, $0x04
+ JB matchlen_match2_match_nolit_calcBlockSizeSmall
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_calcBlockSizeSmall
+ LEAL -4(SI), SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_calcBlockSizeSmall:
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_calcBlockSizeSmall
+ JB match_nolit_end_calcBlockSizeSmall
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_calcBlockSizeSmall
+ LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_calcBlockSizeSmall
+
+matchlen_match1_match_nolit_calcBlockSizeSmall:
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_calcBlockSizeSmall
+ LEAL 1(R9), R9
+
+match_nolit_end_calcBlockSizeSmall:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_calcBlockSizeSmall:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_match_nolit_calcBlockSizeSmall
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_calcBlockSizeSmall
+
+two_byte_offset_short_match_nolit_calcBlockSizeSmall:
+ MOVL R9, BX
+ SHLL $0x02, BX
+ CMPL R9, $0x0c
+ JAE emit_copy_three_match_nolit_calcBlockSizeSmall
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_calcBlockSizeSmall
+
+emit_copy_three_match_nolit_calcBlockSizeSmall:
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_calcBlockSizeSmall:
+ CMPL CX, 8(SP)
+ JAE emit_remainder_calcBlockSizeSmall
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JB match_nolit_dst_ok_calcBlockSizeSmall
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+match_nolit_dst_ok_calcBlockSizeSmall:
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x20, DI
+ IMULQ R8, DI
+ SHRQ $0x37, DI
+ SHLQ $0x20, BX
+ IMULQ R8, BX
+ SHRQ $0x37, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_calcBlockSizeSmall
+ INCL CX
+ JMP search_loop_calcBlockSizeSmall
+
+emit_remainder_calcBlockSizeSmall:
+ MOVQ src_len+8(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JB emit_remainder_ok_calcBlockSizeSmall
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+emit_remainder_ok_calcBlockSizeSmall:
+ MOVQ src_len+8(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_calcBlockSizeSmall
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), CX
+ CMPL CX, $0x3c
+ JB one_byte_emit_remainder_calcBlockSizeSmall
+ CMPL CX, $0x00000100
+ JB two_bytes_emit_remainder_calcBlockSizeSmall
+ JB three_bytes_emit_remainder_calcBlockSizeSmall
+
+three_bytes_emit_remainder_calcBlockSizeSmall:
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_calcBlockSizeSmall
+
+two_bytes_emit_remainder_calcBlockSizeSmall:
+ ADDQ $0x02, AX
+ CMPL CX, $0x40
+ JB memmove_emit_remainder_calcBlockSizeSmall
+ JMP memmove_long_emit_remainder_calcBlockSizeSmall
+
+one_byte_emit_remainder_calcBlockSizeSmall:
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_calcBlockSizeSmall:
+ LEAQ (AX)(SI*1), AX
+ JMP emit_literal_done_emit_remainder_calcBlockSizeSmall
+
+memmove_long_emit_remainder_calcBlockSizeSmall:
+ LEAQ (AX)(SI*1), AX
+
+emit_literal_done_emit_remainder_calcBlockSizeSmall:
+ MOVQ AX, ret+24(FP)
+ RET
+
+// func emitLiteral(dst []byte, lit []byte) int
+// Requires: SSE2
+TEXT ·emitLiteral(SB), NOSPLIT, $0-56
+ MOVQ lit_len+32(FP), DX
+ MOVQ dst_base+0(FP), AX
+ MOVQ lit_base+24(FP), CX
+ TESTQ DX, DX
+ JZ emit_literal_end_standalone_skip
+ MOVL DX, BX
+ LEAL -1(DX), SI
+ CMPL SI, $0x3c
+ JB one_byte_standalone
+ CMPL SI, $0x00000100
+ JB two_bytes_standalone
+ CMPL SI, $0x00010000
+ JB three_bytes_standalone
+ CMPL SI, $0x01000000
+ JB four_bytes_standalone
+ MOVB $0xfc, (AX)
+ MOVL SI, 1(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP memmove_long_standalone
+
+four_bytes_standalone:
+ MOVL SI, DI
+ SHRL $0x10, DI
+ MOVB $0xf8, (AX)
+ MOVW SI, 1(AX)
+ MOVB DI, 3(AX)
+ ADDQ $0x04, BX
+ ADDQ $0x04, AX
+ JMP memmove_long_standalone
+
+three_bytes_standalone:
+ MOVB $0xf4, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+ JMP memmove_long_standalone
+
+two_bytes_standalone:
+ MOVB $0xf0, (AX)
+ MOVB SI, 1(AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ CMPL SI, $0x40
+ JB memmove_standalone
+ JMP memmove_long_standalone
+
+one_byte_standalone:
+ SHLB $0x02, SI
+ MOVB SI, (AX)
+ ADDQ $0x01, BX
+ ADDQ $0x01, AX
+
+memmove_standalone:
+ // genMemMoveShort
+ CMPQ DX, $0x03
+ JB emit_lit_memmove_standalone_memmove_move_1or2
+ JE emit_lit_memmove_standalone_memmove_move_3
+ CMPQ DX, $0x08
+ JB emit_lit_memmove_standalone_memmove_move_4through7
+ CMPQ DX, $0x10
+ JBE emit_lit_memmove_standalone_memmove_move_8through16
+ CMPQ DX, $0x20
+ JBE emit_lit_memmove_standalone_memmove_move_17through32
+ JMP emit_lit_memmove_standalone_memmove_move_33through64
+
+emit_lit_memmove_standalone_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(DX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(DX*1)
+ JMP emit_literal_end_standalone
+
+emit_lit_memmove_standalone_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP emit_literal_end_standalone
+
+emit_lit_memmove_standalone_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(DX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(DX*1)
+ JMP emit_literal_end_standalone
+
+emit_lit_memmove_standalone_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(DX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(DX*1)
+ JMP emit_literal_end_standalone
+
+emit_lit_memmove_standalone_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(DX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(DX*1)
+ JMP emit_literal_end_standalone
+
+emit_lit_memmove_standalone_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(DX*1), X2
+ MOVOU -16(CX)(DX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DX*1)
+ MOVOU X3, -16(AX)(DX*1)
+ JMP emit_literal_end_standalone
+ JMP emit_literal_end_standalone
+
+memmove_long_standalone:
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(DX*1), X2
+ MOVOU -16(CX)(DX*1), X3
+ MOVQ DX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_standalonelarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_standalonelarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_standalonelarge_big_loop_back
+
+emit_lit_memmove_long_standalonelarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ DX, R8
+ JAE emit_lit_memmove_long_standalonelarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DX*1)
+ MOVOU X3, -16(AX)(DX*1)
+ JMP emit_literal_end_standalone
+ JMP emit_literal_end_standalone
+
+emit_literal_end_standalone_skip:
+ XORQ BX, BX
+
+emit_literal_end_standalone:
+ MOVQ BX, ret+48(FP)
+ RET
+
+// func emitRepeat(dst []byte, offset int, length int) int
+TEXT ·emitRepeat(SB), NOSPLIT, $0-48
+ XORQ BX, BX
+ MOVQ dst_base+0(FP), AX
+ MOVQ offset+24(FP), CX
+ MOVQ length+32(FP), DX
+
+ // emitRepeat
+emit_repeat_again_standalone:
+ MOVL DX, SI
+ LEAL -4(DX), DX
+ CMPL SI, $0x08
+ JBE repeat_two_standalone
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_standalone
+ CMPL CX, $0x00000800
+ JB repeat_two_offset_standalone
+
+cant_repeat_two_offset_standalone:
+ CMPL DX, $0x00000104
+ JB repeat_three_standalone
+ CMPL DX, $0x00010100
+ JB repeat_four_standalone
+ CMPL DX, $0x0100ffff
+ JB repeat_five_standalone
+ LEAL -16842747(DX), DX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ ADDQ $0x05, BX
+ JMP emit_repeat_again_standalone
+
+repeat_five_standalone:
+ LEAL -65536(DX), DX
+ MOVL DX, CX
+ MOVW $0x001d, (AX)
+ MOVW DX, 2(AX)
+ SARL $0x10, CX
+ MOVB CL, 4(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_repeat_end
+
+repeat_four_standalone:
+ LEAL -256(DX), DX
+ MOVW $0x0019, (AX)
+ MOVW DX, 2(AX)
+ ADDQ $0x04, BX
+ ADDQ $0x04, AX
+ JMP gen_emit_repeat_end
+
+repeat_three_standalone:
+ LEAL -4(DX), DX
+ MOVW $0x0015, (AX)
+ MOVB DL, 2(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+ JMP gen_emit_repeat_end
+
+repeat_two_standalone:
+ SHLL $0x02, DX
+ ORL $0x01, DX
+ MOVW DX, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_repeat_end
+
+repeat_two_offset_standalone:
+ XORQ SI, SI
+ LEAL 1(SI)(DX*4), DX
+ MOVB CL, 1(AX)
+ SARL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, DX
+ MOVB DL, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+
+gen_emit_repeat_end:
+ MOVQ BX, ret+40(FP)
+ RET
+
+// func emitCopy(dst []byte, offset int, length int) int
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+ XORQ BX, BX
+ MOVQ dst_base+0(FP), AX
+ MOVQ offset+24(FP), CX
+ MOVQ length+32(FP), DX
+
+ // emitCopy
+ CMPL CX, $0x00010000
+ JB two_byte_offset_standalone
+ CMPL DX, $0x40
+ JBE four_bytes_remain_standalone
+ MOVB $0xff, (AX)
+ MOVL CX, 1(AX)
+ LEAL -64(DX), DX
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ CMPL DX, $0x04
+ JB four_bytes_remain_standalone
+
+ // emitRepeat
+emit_repeat_again_standalone_emit_copy:
+ MOVL DX, SI
+ LEAL -4(DX), DX
+ CMPL SI, $0x08
+ JBE repeat_two_standalone_emit_copy
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_standalone_emit_copy
+ CMPL CX, $0x00000800
+ JB repeat_two_offset_standalone_emit_copy
+
+cant_repeat_two_offset_standalone_emit_copy:
+ CMPL DX, $0x00000104
+ JB repeat_three_standalone_emit_copy
+ CMPL DX, $0x00010100
+ JB repeat_four_standalone_emit_copy
+ CMPL DX, $0x0100ffff
+ JB repeat_five_standalone_emit_copy
+ LEAL -16842747(DX), DX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ ADDQ $0x05, BX
+ JMP emit_repeat_again_standalone_emit_copy
+
+repeat_five_standalone_emit_copy:
+ LEAL -65536(DX), DX
+ MOVL DX, CX
+ MOVW $0x001d, (AX)
+ MOVW DX, 2(AX)
+ SARL $0x10, CX
+ MOVB CL, 4(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_copy_end
+
+repeat_four_standalone_emit_copy:
+ LEAL -256(DX), DX
+ MOVW $0x0019, (AX)
+ MOVW DX, 2(AX)
+ ADDQ $0x04, BX
+ ADDQ $0x04, AX
+ JMP gen_emit_copy_end
+
+repeat_three_standalone_emit_copy:
+ LEAL -4(DX), DX
+ MOVW $0x0015, (AX)
+ MOVB DL, 2(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+ JMP gen_emit_copy_end
+
+repeat_two_standalone_emit_copy:
+ SHLL $0x02, DX
+ ORL $0x01, DX
+ MOVW DX, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+repeat_two_offset_standalone_emit_copy:
+ XORQ SI, SI
+ LEAL 1(SI)(DX*4), DX
+ MOVB CL, 1(AX)
+ SARL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, DX
+ MOVB DL, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+four_bytes_remain_standalone:
+ TESTL DX, DX
+ JZ gen_emit_copy_end
+ XORL SI, SI
+ LEAL -1(SI)(DX*4), DX
+ MOVB DL, (AX)
+ MOVL CX, 1(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_copy_end
+
+two_byte_offset_standalone:
+ CMPL DX, $0x40
+ JBE two_byte_offset_short_standalone
+ CMPL CX, $0x00000800
+ JAE long_offset_short_standalone
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB CL, 1(AX)
+ MOVL CX, DI
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ SUBL $0x08, DX
+
+ // emitRepeat
+ LEAL -4(DX), DX
+ JMP cant_repeat_two_offset_standalone_emit_copy_short_2b
+
+emit_repeat_again_standalone_emit_copy_short_2b:
+ MOVL DX, SI
+ LEAL -4(DX), DX
+ CMPL SI, $0x08
+ JBE repeat_two_standalone_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_standalone_emit_copy_short_2b
+ CMPL CX, $0x00000800
+ JB repeat_two_offset_standalone_emit_copy_short_2b
+
+cant_repeat_two_offset_standalone_emit_copy_short_2b:
+ CMPL DX, $0x00000104
+ JB repeat_three_standalone_emit_copy_short_2b
+ CMPL DX, $0x00010100
+ JB repeat_four_standalone_emit_copy_short_2b
+ CMPL DX, $0x0100ffff
+ JB repeat_five_standalone_emit_copy_short_2b
+ LEAL -16842747(DX), DX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ ADDQ $0x05, BX
+ JMP emit_repeat_again_standalone_emit_copy_short_2b
+
+repeat_five_standalone_emit_copy_short_2b:
+ LEAL -65536(DX), DX
+ MOVL DX, CX
+ MOVW $0x001d, (AX)
+ MOVW DX, 2(AX)
+ SARL $0x10, CX
+ MOVB CL, 4(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_copy_end
+
+repeat_four_standalone_emit_copy_short_2b:
+ LEAL -256(DX), DX
+ MOVW $0x0019, (AX)
+ MOVW DX, 2(AX)
+ ADDQ $0x04, BX
+ ADDQ $0x04, AX
+ JMP gen_emit_copy_end
+
+repeat_three_standalone_emit_copy_short_2b:
+ LEAL -4(DX), DX
+ MOVW $0x0015, (AX)
+ MOVB DL, 2(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+ JMP gen_emit_copy_end
+
+repeat_two_standalone_emit_copy_short_2b:
+ SHLL $0x02, DX
+ ORL $0x01, DX
+ MOVW DX, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+repeat_two_offset_standalone_emit_copy_short_2b:
+ XORQ SI, SI
+ LEAL 1(SI)(DX*4), DX
+ MOVB CL, 1(AX)
+ SARL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, DX
+ MOVB DL, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+long_offset_short_standalone:
+ MOVB $0xee, (AX)
+ MOVW CX, 1(AX)
+ LEAL -60(DX), DX
+ ADDQ $0x03, AX
+ ADDQ $0x03, BX
+
+ // emitRepeat
+emit_repeat_again_standalone_emit_copy_short:
+ MOVL DX, SI
+ LEAL -4(DX), DX
+ CMPL SI, $0x08
+ JBE repeat_two_standalone_emit_copy_short
+ CMPL SI, $0x0c
+ JAE cant_repeat_two_offset_standalone_emit_copy_short
+ CMPL CX, $0x00000800
+ JB repeat_two_offset_standalone_emit_copy_short
+
+cant_repeat_two_offset_standalone_emit_copy_short:
+ CMPL DX, $0x00000104
+ JB repeat_three_standalone_emit_copy_short
+ CMPL DX, $0x00010100
+ JB repeat_four_standalone_emit_copy_short
+ CMPL DX, $0x0100ffff
+ JB repeat_five_standalone_emit_copy_short
+ LEAL -16842747(DX), DX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ ADDQ $0x05, BX
+ JMP emit_repeat_again_standalone_emit_copy_short
+
+repeat_five_standalone_emit_copy_short:
+ LEAL -65536(DX), DX
+ MOVL DX, CX
+ MOVW $0x001d, (AX)
+ MOVW DX, 2(AX)
+ SARL $0x10, CX
+ MOVB CL, 4(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_copy_end
+
+repeat_four_standalone_emit_copy_short:
+ LEAL -256(DX), DX
+ MOVW $0x0019, (AX)
+ MOVW DX, 2(AX)
+ ADDQ $0x04, BX
+ ADDQ $0x04, AX
+ JMP gen_emit_copy_end
+
+repeat_three_standalone_emit_copy_short:
+ LEAL -4(DX), DX
+ MOVW $0x0015, (AX)
+ MOVB DL, 2(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+ JMP gen_emit_copy_end
+
+repeat_two_standalone_emit_copy_short:
+ SHLL $0x02, DX
+ ORL $0x01, DX
+ MOVW DX, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+repeat_two_offset_standalone_emit_copy_short:
+ XORQ SI, SI
+ LEAL 1(SI)(DX*4), DX
+ MOVB CL, 1(AX)
+ SARL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, DX
+ MOVB DL, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+two_byte_offset_short_standalone:
+ MOVL DX, SI
+ SHLL $0x02, SI
+ CMPL DX, $0x0c
+ JAE emit_copy_three_standalone
+ CMPL CX, $0x00000800
+ JAE emit_copy_three_standalone
+ LEAL -15(SI), SI
+ MOVB CL, 1(AX)
+ SHRL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+emit_copy_three_standalone:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW CX, 1(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+
+gen_emit_copy_end:
+ MOVQ BX, ret+40(FP)
+ RET
+
+// func emitCopyNoRepeat(dst []byte, offset int, length int) int
+TEXT ·emitCopyNoRepeat(SB), NOSPLIT, $0-48
+ XORQ BX, BX
+ MOVQ dst_base+0(FP), AX
+ MOVQ offset+24(FP), CX
+ MOVQ length+32(FP), DX
+
+ // emitCopy
+ CMPL CX, $0x00010000
+ JB two_byte_offset_standalone_snappy
+
+four_bytes_loop_back_standalone_snappy:
+ CMPL DX, $0x40
+ JBE four_bytes_remain_standalone_snappy
+ MOVB $0xff, (AX)
+ MOVL CX, 1(AX)
+ LEAL -64(DX), DX
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ CMPL DX, $0x04
+ JB four_bytes_remain_standalone_snappy
+ JMP four_bytes_loop_back_standalone_snappy
+
+four_bytes_remain_standalone_snappy:
+ TESTL DX, DX
+ JZ gen_emit_copy_end_snappy
+ XORL SI, SI
+ LEAL -1(SI)(DX*4), DX
+ MOVB DL, (AX)
+ MOVL CX, 1(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_copy_end_snappy
+
+two_byte_offset_standalone_snappy:
+ CMPL DX, $0x40
+ JBE two_byte_offset_short_standalone_snappy
+ MOVB $0xee, (AX)
+ MOVW CX, 1(AX)
+ LEAL -60(DX), DX
+ ADDQ $0x03, AX
+ ADDQ $0x03, BX
+ JMP two_byte_offset_standalone_snappy
+
+two_byte_offset_short_standalone_snappy:
+ MOVL DX, SI
+ SHLL $0x02, SI
+ CMPL DX, $0x0c
+ JAE emit_copy_three_standalone_snappy
+ CMPL CX, $0x00000800
+ JAE emit_copy_three_standalone_snappy
+ LEAL -15(SI), SI
+ MOVB CL, 1(AX)
+ SHRL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end_snappy
+
+emit_copy_three_standalone_snappy:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW CX, 1(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+
+gen_emit_copy_end_snappy:
+ MOVQ BX, ret+40(FP)
+ RET
+
+// func matchLen(a []byte, b []byte) int
+// Requires: BMI
+TEXT ·matchLen(SB), NOSPLIT, $0-56
+ MOVQ a_base+0(FP), AX
+ MOVQ b_base+24(FP), CX
+ MOVQ a_len+8(FP), DX
+
+ // matchLen
+ XORL SI, SI
+ CMPL DX, $0x08
+ JB matchlen_match4_standalone
+
+matchlen_loopback_standalone:
+ MOVQ (AX)(SI*1), BX
+ XORQ (CX)(SI*1), BX
+ TESTQ BX, BX
+ JZ matchlen_loop_standalone
+
+#ifdef GOAMD64_v3
+ TZCNTQ BX, BX
+
+#else
+ BSFQ BX, BX
+
+#endif
+ SARQ $0x03, BX
+ LEAL (SI)(BX*1), SI
+ JMP gen_match_len_end
+
+matchlen_loop_standalone:
+ LEAL -8(DX), DX
+ LEAL 8(SI), SI
+ CMPL DX, $0x08
+ JAE matchlen_loopback_standalone
+
+matchlen_match4_standalone:
+ CMPL DX, $0x04
+ JB matchlen_match2_standalone
+ MOVL (AX)(SI*1), BX
+ CMPL (CX)(SI*1), BX
+ JNE matchlen_match2_standalone
+ LEAL -4(DX), DX
+ LEAL 4(SI), SI
+
+matchlen_match2_standalone:
+ CMPL DX, $0x01
+ JE matchlen_match1_standalone
+ JB gen_match_len_end
+ MOVW (AX)(SI*1), BX
+ CMPW (CX)(SI*1), BX
+ JNE matchlen_match1_standalone
+ LEAL 2(SI), SI
+ SUBL $0x02, DX
+ JZ gen_match_len_end
+
+matchlen_match1_standalone:
+ MOVB (AX)(SI*1), BL
+ CMPB (CX)(SI*1), BL
+ JNE gen_match_len_end
+ LEAL 1(SI), SI
+
+gen_match_len_end:
+ MOVQ SI, ret+48(FP)
+ RET
+
+// func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
+// Requires: SSE2
+TEXT ·cvtLZ4BlockAsm(SB), NOSPLIT, $0-64
+ XORQ SI, SI
+ MOVQ dst_base+0(FP), AX
+ MOVQ dst_len+8(FP), CX
+ MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), BX
+ LEAQ (DX)(BX*1), BX
+ LEAQ -10(AX)(CX*1), CX
+ XORQ DI, DI
+
+lz4_s2_loop:
+ CMPQ DX, BX
+ JAE lz4_s2_corrupt
+ CMPQ AX, CX
+ JAE lz4_s2_dstfull
+ MOVBQZX (DX), R8
+ MOVQ R8, R9
+ MOVQ R8, R10
+ SHRQ $0x04, R9
+ ANDQ $0x0f, R10
+ CMPQ R8, $0xf0
+ JB lz4_s2_ll_end
+
+lz4_s2_ll_loop:
+ INCQ DX
+ CMPQ DX, BX
+ JAE lz4_s2_corrupt
+ MOVBQZX (DX), R8
+ ADDQ R8, R9
+ CMPQ R8, $0xff
+ JEQ lz4_s2_ll_loop
+
+lz4_s2_ll_end:
+ LEAQ (DX)(R9*1), R8
+ ADDQ $0x04, R10
+ CMPQ R8, BX
+ JAE lz4_s2_corrupt
+ INCQ DX
+ INCQ R8
+ TESTQ R9, R9
+ JZ lz4_s2_lits_done
+ LEAQ (AX)(R9*1), R11
+ CMPQ R11, CX
+ JAE lz4_s2_dstfull
+ ADDQ R9, SI
+ LEAL -1(R9), R11
+ CMPL R11, $0x3c
+ JB one_byte_lz4_s2
+ CMPL R11, $0x00000100
+ JB two_bytes_lz4_s2
+ CMPL R11, $0x00010000
+ JB three_bytes_lz4_s2
+ CMPL R11, $0x01000000
+ JB four_bytes_lz4_s2
+ MOVB $0xfc, (AX)
+ MOVL R11, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_lz4_s2
+
+four_bytes_lz4_s2:
+ MOVL R11, R12
+ SHRL $0x10, R12
+ MOVB $0xf8, (AX)
+ MOVW R11, 1(AX)
+ MOVB R12, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_lz4_s2
+
+three_bytes_lz4_s2:
+ MOVB $0xf4, (AX)
+ MOVW R11, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_lz4_s2
+
+two_bytes_lz4_s2:
+ MOVB $0xf0, (AX)
+ MOVB R11, 1(AX)
+ ADDQ $0x02, AX
+ CMPL R11, $0x40
+ JB memmove_lz4_s2
+ JMP memmove_long_lz4_s2
+
+one_byte_lz4_s2:
+ SHLB $0x02, R11
+ MOVB R11, (AX)
+ ADDQ $0x01, AX
+
+memmove_lz4_s2:
+ LEAQ (AX)(R9*1), R11
+
+ // genMemMoveShort
+ CMPQ R9, $0x08
+ JBE emit_lit_memmove_lz4_s2_memmove_move_8
+ CMPQ R9, $0x10
+ JBE emit_lit_memmove_lz4_s2_memmove_move_8through16
+ CMPQ R9, $0x20
+ JBE emit_lit_memmove_lz4_s2_memmove_move_17through32
+ JMP emit_lit_memmove_lz4_s2_memmove_move_33through64
+
+emit_lit_memmove_lz4_s2_memmove_move_8:
+ MOVQ (DX), R12
+ MOVQ R12, (AX)
+ JMP memmove_end_copy_lz4_s2
+
+emit_lit_memmove_lz4_s2_memmove_move_8through16:
+ MOVQ (DX), R12
+ MOVQ -8(DX)(R9*1), DX
+ MOVQ R12, (AX)
+ MOVQ DX, -8(AX)(R9*1)
+ JMP memmove_end_copy_lz4_s2
+
+emit_lit_memmove_lz4_s2_memmove_move_17through32:
+ MOVOU (DX), X0
+ MOVOU -16(DX)(R9*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R9*1)
+ JMP memmove_end_copy_lz4_s2
+
+emit_lit_memmove_lz4_s2_memmove_move_33through64:
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R9*1), X2
+ MOVOU -16(DX)(R9*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R9*1)
+ MOVOU X3, -16(AX)(R9*1)
+
+memmove_end_copy_lz4_s2:
+ MOVQ R11, AX
+ JMP lz4_s2_lits_emit_done
+
+memmove_long_lz4_s2:
+ LEAQ (AX)(R9*1), R11
+
+ // genMemMoveLong
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R9*1), X2
+ MOVOU -16(DX)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ AX, R12
+ ANDL $0x0000001f, R12
+ MOVQ $0x00000040, R14
+ SUBQ R12, R14
+ DECQ R13
+ JA emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32
+ LEAQ -32(DX)(R14*1), R12
+ LEAQ -32(AX)(R14*1), R15
+
+emit_lit_memmove_long_lz4_s2large_big_loop_back:
+ MOVOU (R12), X4
+ MOVOU 16(R12), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R12
+ ADDQ $0x20, R14
+ DECQ R13
+ JNA emit_lit_memmove_long_lz4_s2large_big_loop_back
+
+emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32:
+ MOVOU -32(DX)(R14*1), X4
+ MOVOU -16(DX)(R14*1), X5
+ MOVOA X4, -32(AX)(R14*1)
+ MOVOA X5, -16(AX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
+ JAE emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R9*1)
+ MOVOU X3, -16(AX)(R9*1)
+ MOVQ R11, AX
+
+lz4_s2_lits_emit_done:
+ MOVQ R8, DX
+
+lz4_s2_lits_done:
+ CMPQ DX, BX
+ JNE lz4_s2_match
+ CMPQ R10, $0x04
+ JEQ lz4_s2_done
+ JMP lz4_s2_corrupt
+
+lz4_s2_match:
+ LEAQ 2(DX), R8
+ CMPQ R8, BX
+ JAE lz4_s2_corrupt
+ MOVWQZX (DX), R9
+ MOVQ R8, DX
+ TESTQ R9, R9
+ JZ lz4_s2_corrupt
+ CMPQ R9, SI
+ JA lz4_s2_corrupt
+ CMPQ R10, $0x13
+ JNE lz4_s2_ml_done
+
+lz4_s2_ml_loop:
+ MOVBQZX (DX), R8
+ INCQ DX
+ ADDQ R8, R10
+ CMPQ DX, BX
+ JAE lz4_s2_corrupt
+ CMPQ R8, $0xff
+ JEQ lz4_s2_ml_loop
+
+lz4_s2_ml_done:
+ ADDQ R10, SI
+ CMPQ R9, DI
+ JNE lz4_s2_docopy
+
+ // emitRepeat
+emit_repeat_again_lz4_s2:
+ MOVL R10, R8
+ LEAL -4(R10), R10
+ CMPL R8, $0x08
+ JBE repeat_two_lz4_s2
+ CMPL R8, $0x0c
+ JAE cant_repeat_two_offset_lz4_s2
+ CMPL R9, $0x00000800
+ JB repeat_two_offset_lz4_s2
+
+cant_repeat_two_offset_lz4_s2:
+ CMPL R10, $0x00000104
+ JB repeat_three_lz4_s2
+ CMPL R10, $0x00010100
+ JB repeat_four_lz4_s2
+ CMPL R10, $0x0100ffff
+ JB repeat_five_lz4_s2
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_lz4_s2
+
+repeat_five_lz4_s2:
+ LEAL -65536(R10), R10
+ MOVL R10, R9
+ MOVW $0x001d, (AX)
+ MOVW R10, 2(AX)
+ SARL $0x10, R9
+ MOVB R9, 4(AX)
+ ADDQ $0x05, AX
+ JMP lz4_s2_loop
+
+repeat_four_lz4_s2:
+ LEAL -256(R10), R10
+ MOVW $0x0019, (AX)
+ MOVW R10, 2(AX)
+ ADDQ $0x04, AX
+ JMP lz4_s2_loop
+
+repeat_three_lz4_s2:
+ LEAL -4(R10), R10
+ MOVW $0x0015, (AX)
+ MOVB R10, 2(AX)
+ ADDQ $0x03, AX
+ JMP lz4_s2_loop
+
+repeat_two_lz4_s2:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4_s2_loop
+
+repeat_two_offset_lz4_s2:
+ XORQ R8, R8
+ LEAL 1(R8)(R10*4), R10
+ MOVB R9, 1(AX)
+ SARL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R10
+ MOVB R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4_s2_loop
+
+lz4_s2_docopy:
+ MOVQ R9, DI
+
+ // emitCopy
+ CMPL R10, $0x40
+ JBE two_byte_offset_short_lz4_s2
+ CMPL R9, $0x00000800
+ JAE long_offset_short_lz4_s2
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB R9, 1(AX)
+ MOVL R9, R11
+ SHRL $0x08, R11
+ SHLL $0x05, R11
+ ORL R11, R8
+ MOVB R8, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R10
+
+ // emitRepeat
+ LEAL -4(R10), R10
+ JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b
+
+emit_repeat_again_lz4_s2_emit_copy_short_2b:
+ MOVL R10, R8
+ LEAL -4(R10), R10
+ CMPL R8, $0x08
+ JBE repeat_two_lz4_s2_emit_copy_short_2b
+ CMPL R8, $0x0c
+ JAE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b
+ CMPL R9, $0x00000800
+ JB repeat_two_offset_lz4_s2_emit_copy_short_2b
+
+cant_repeat_two_offset_lz4_s2_emit_copy_short_2b:
+ CMPL R10, $0x00000104
+ JB repeat_three_lz4_s2_emit_copy_short_2b
+ CMPL R10, $0x00010100
+ JB repeat_four_lz4_s2_emit_copy_short_2b
+ CMPL R10, $0x0100ffff
+ JB repeat_five_lz4_s2_emit_copy_short_2b
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_lz4_s2_emit_copy_short_2b
+
+repeat_five_lz4_s2_emit_copy_short_2b:
+ LEAL -65536(R10), R10
+ MOVL R10, R9
+ MOVW $0x001d, (AX)
+ MOVW R10, 2(AX)
+ SARL $0x10, R9
+ MOVB R9, 4(AX)
+ ADDQ $0x05, AX
+ JMP lz4_s2_loop
+
+repeat_four_lz4_s2_emit_copy_short_2b:
+ LEAL -256(R10), R10
+ MOVW $0x0019, (AX)
+ MOVW R10, 2(AX)
+ ADDQ $0x04, AX
+ JMP lz4_s2_loop
+
+repeat_three_lz4_s2_emit_copy_short_2b:
+ LEAL -4(R10), R10
+ MOVW $0x0015, (AX)
+ MOVB R10, 2(AX)
+ ADDQ $0x03, AX
+ JMP lz4_s2_loop
+
+repeat_two_lz4_s2_emit_copy_short_2b:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4_s2_loop
+
+repeat_two_offset_lz4_s2_emit_copy_short_2b:
+ XORQ R8, R8
+ LEAL 1(R8)(R10*4), R10
+ MOVB R9, 1(AX)
+ SARL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R10
+ MOVB R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4_s2_loop
+
+long_offset_short_lz4_s2:
+ MOVB $0xee, (AX)
+ MOVW R9, 1(AX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, AX
+
+ // emitRepeat
+emit_repeat_again_lz4_s2_emit_copy_short:
+ MOVL R10, R8
+ LEAL -4(R10), R10
+ CMPL R8, $0x08
+ JBE repeat_two_lz4_s2_emit_copy_short
+ CMPL R8, $0x0c
+ JAE cant_repeat_two_offset_lz4_s2_emit_copy_short
+ CMPL R9, $0x00000800
+ JB repeat_two_offset_lz4_s2_emit_copy_short
+
+cant_repeat_two_offset_lz4_s2_emit_copy_short:
+ CMPL R10, $0x00000104
+ JB repeat_three_lz4_s2_emit_copy_short
+ CMPL R10, $0x00010100
+ JB repeat_four_lz4_s2_emit_copy_short
+ CMPL R10, $0x0100ffff
+ JB repeat_five_lz4_s2_emit_copy_short
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_lz4_s2_emit_copy_short
+
+repeat_five_lz4_s2_emit_copy_short:
+ LEAL -65536(R10), R10
+ MOVL R10, R9
+ MOVW $0x001d, (AX)
+ MOVW R10, 2(AX)
+ SARL $0x10, R9
+ MOVB R9, 4(AX)
+ ADDQ $0x05, AX
+ JMP lz4_s2_loop
+
+repeat_four_lz4_s2_emit_copy_short:
+ LEAL -256(R10), R10
+ MOVW $0x0019, (AX)
+ MOVW R10, 2(AX)
+ ADDQ $0x04, AX
+ JMP lz4_s2_loop
+
+repeat_three_lz4_s2_emit_copy_short:
+ LEAL -4(R10), R10
+ MOVW $0x0015, (AX)
+ MOVB R10, 2(AX)
+ ADDQ $0x03, AX
+ JMP lz4_s2_loop
+
+repeat_two_lz4_s2_emit_copy_short:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4_s2_loop
+
+repeat_two_offset_lz4_s2_emit_copy_short:
+ XORQ R8, R8
+ LEAL 1(R8)(R10*4), R10
+ MOVB R9, 1(AX)
+ SARL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R10
+ MOVB R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4_s2_loop
+
+two_byte_offset_short_lz4_s2:
+ MOVL R10, R8
+ SHLL $0x02, R8
+ CMPL R10, $0x0c
+ JAE emit_copy_three_lz4_s2
+ CMPL R9, $0x00000800
+ JAE emit_copy_three_lz4_s2
+ LEAL -15(R8), R8
+ MOVB R9, 1(AX)
+ SHRL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R8
+ MOVB R8, (AX)
+ ADDQ $0x02, AX
+ JMP lz4_s2_loop
+
+emit_copy_three_lz4_s2:
+ LEAL -2(R8), R8
+ MOVB R8, (AX)
+ MOVW R9, 1(AX)
+ ADDQ $0x03, AX
+ JMP lz4_s2_loop
+
+lz4_s2_done:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ SI, uncompressed+48(FP)
+ MOVQ AX, dstUsed+56(FP)
+ RET
+
+lz4_s2_corrupt:
+ XORQ AX, AX
+ LEAQ -1(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
+
+lz4_s2_dstfull:
+ XORQ AX, AX
+ LEAQ -2(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
+
+// func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
+// Requires: SSE2
+TEXT ·cvtLZ4sBlockAsm(SB), NOSPLIT, $0-64
+ XORQ SI, SI
+ MOVQ dst_base+0(FP), AX
+ MOVQ dst_len+8(FP), CX
+ MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), BX
+ LEAQ (DX)(BX*1), BX
+ LEAQ -10(AX)(CX*1), CX
+ XORQ DI, DI
+
+lz4s_s2_loop:
+ CMPQ DX, BX
+ JAE lz4s_s2_corrupt
+ CMPQ AX, CX
+ JAE lz4s_s2_dstfull
+ MOVBQZX (DX), R8
+ MOVQ R8, R9
+ MOVQ R8, R10
+ SHRQ $0x04, R9
+ ANDQ $0x0f, R10
+ CMPQ R8, $0xf0
+ JB lz4s_s2_ll_end
+
+lz4s_s2_ll_loop:
+ INCQ DX
+ CMPQ DX, BX
+ JAE lz4s_s2_corrupt
+ MOVBQZX (DX), R8
+ ADDQ R8, R9
+ CMPQ R8, $0xff
+ JEQ lz4s_s2_ll_loop
+
+lz4s_s2_ll_end:
+ LEAQ (DX)(R9*1), R8
+ ADDQ $0x03, R10
+ CMPQ R8, BX
+ JAE lz4s_s2_corrupt
+ INCQ DX
+ INCQ R8
+ TESTQ R9, R9
+ JZ lz4s_s2_lits_done
+ LEAQ (AX)(R9*1), R11
+ CMPQ R11, CX
+ JAE lz4s_s2_dstfull
+ ADDQ R9, SI
+ LEAL -1(R9), R11
+ CMPL R11, $0x3c
+ JB one_byte_lz4s_s2
+ CMPL R11, $0x00000100
+ JB two_bytes_lz4s_s2
+ CMPL R11, $0x00010000
+ JB three_bytes_lz4s_s2
+ CMPL R11, $0x01000000
+ JB four_bytes_lz4s_s2
+ MOVB $0xfc, (AX)
+ MOVL R11, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_lz4s_s2
+
+four_bytes_lz4s_s2:
+ MOVL R11, R12
+ SHRL $0x10, R12
+ MOVB $0xf8, (AX)
+ MOVW R11, 1(AX)
+ MOVB R12, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_lz4s_s2
+
+three_bytes_lz4s_s2:
+ MOVB $0xf4, (AX)
+ MOVW R11, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_lz4s_s2
+
+two_bytes_lz4s_s2:
+ MOVB $0xf0, (AX)
+ MOVB R11, 1(AX)
+ ADDQ $0x02, AX
+ CMPL R11, $0x40
+ JB memmove_lz4s_s2
+ JMP memmove_long_lz4s_s2
+
+one_byte_lz4s_s2:
+ SHLB $0x02, R11
+ MOVB R11, (AX)
+ ADDQ $0x01, AX
+
+memmove_lz4s_s2:
+ LEAQ (AX)(R9*1), R11
+
+ // genMemMoveShort
+ CMPQ R9, $0x08
+ JBE emit_lit_memmove_lz4s_s2_memmove_move_8
+ CMPQ R9, $0x10
+ JBE emit_lit_memmove_lz4s_s2_memmove_move_8through16
+ CMPQ R9, $0x20
+ JBE emit_lit_memmove_lz4s_s2_memmove_move_17through32
+ JMP emit_lit_memmove_lz4s_s2_memmove_move_33through64
+
+emit_lit_memmove_lz4s_s2_memmove_move_8:
+ MOVQ (DX), R12
+ MOVQ R12, (AX)
+ JMP memmove_end_copy_lz4s_s2
+
+emit_lit_memmove_lz4s_s2_memmove_move_8through16:
+ MOVQ (DX), R12
+ MOVQ -8(DX)(R9*1), DX
+ MOVQ R12, (AX)
+ MOVQ DX, -8(AX)(R9*1)
+ JMP memmove_end_copy_lz4s_s2
+
+emit_lit_memmove_lz4s_s2_memmove_move_17through32:
+ MOVOU (DX), X0
+ MOVOU -16(DX)(R9*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R9*1)
+ JMP memmove_end_copy_lz4s_s2
+
+emit_lit_memmove_lz4s_s2_memmove_move_33through64:
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R9*1), X2
+ MOVOU -16(DX)(R9*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R9*1)
+ MOVOU X3, -16(AX)(R9*1)
+
+memmove_end_copy_lz4s_s2:
+ MOVQ R11, AX
+ JMP lz4s_s2_lits_emit_done
+
+memmove_long_lz4s_s2:
+ LEAQ (AX)(R9*1), R11
+
+ // genMemMoveLong
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R9*1), X2
+ MOVOU -16(DX)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ AX, R12
+ ANDL $0x0000001f, R12
+ MOVQ $0x00000040, R14
+ SUBQ R12, R14
+ DECQ R13
+ JA emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32
+ LEAQ -32(DX)(R14*1), R12
+ LEAQ -32(AX)(R14*1), R15
+
+emit_lit_memmove_long_lz4s_s2large_big_loop_back:
+ MOVOU (R12), X4
+ MOVOU 16(R12), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R12
+ ADDQ $0x20, R14
+ DECQ R13
+ JNA emit_lit_memmove_long_lz4s_s2large_big_loop_back
+
+emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32:
+ MOVOU -32(DX)(R14*1), X4
+ MOVOU -16(DX)(R14*1), X5
+ MOVOA X4, -32(AX)(R14*1)
+ MOVOA X5, -16(AX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
+ JAE emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R9*1)
+ MOVOU X3, -16(AX)(R9*1)
+ MOVQ R11, AX
+
+lz4s_s2_lits_emit_done:
+ MOVQ R8, DX
+
+lz4s_s2_lits_done:
+ CMPQ DX, BX
+ JNE lz4s_s2_match
+ CMPQ R10, $0x03
+ JEQ lz4s_s2_done
+ JMP lz4s_s2_corrupt
+
+lz4s_s2_match:
+ CMPQ R10, $0x03
+ JEQ lz4s_s2_loop
+ LEAQ 2(DX), R8
+ CMPQ R8, BX
+ JAE lz4s_s2_corrupt
+ MOVWQZX (DX), R9
+ MOVQ R8, DX
+ TESTQ R9, R9
+ JZ lz4s_s2_corrupt
+ CMPQ R9, SI
+ JA lz4s_s2_corrupt
+ CMPQ R10, $0x12
+ JNE lz4s_s2_ml_done
+
+lz4s_s2_ml_loop:
+ MOVBQZX (DX), R8
+ INCQ DX
+ ADDQ R8, R10
+ CMPQ DX, BX
+ JAE lz4s_s2_corrupt
+ CMPQ R8, $0xff
+ JEQ lz4s_s2_ml_loop
+
+lz4s_s2_ml_done:
+ ADDQ R10, SI
+ CMPQ R9, DI
+ JNE lz4s_s2_docopy
+
+ // emitRepeat
+emit_repeat_again_lz4_s2:
+ MOVL R10, R8
+ LEAL -4(R10), R10
+ CMPL R8, $0x08
+ JBE repeat_two_lz4_s2
+ CMPL R8, $0x0c
+ JAE cant_repeat_two_offset_lz4_s2
+ CMPL R9, $0x00000800
+ JB repeat_two_offset_lz4_s2
+
+cant_repeat_two_offset_lz4_s2:
+ CMPL R10, $0x00000104
+ JB repeat_three_lz4_s2
+ CMPL R10, $0x00010100
+ JB repeat_four_lz4_s2
+ CMPL R10, $0x0100ffff
+ JB repeat_five_lz4_s2
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_lz4_s2
+
+repeat_five_lz4_s2:
+ LEAL -65536(R10), R10
+ MOVL R10, R9
+ MOVW $0x001d, (AX)
+ MOVW R10, 2(AX)
+ SARL $0x10, R9
+ MOVB R9, 4(AX)
+ ADDQ $0x05, AX
+ JMP lz4s_s2_loop
+
+repeat_four_lz4_s2:
+ LEAL -256(R10), R10
+ MOVW $0x0019, (AX)
+ MOVW R10, 2(AX)
+ ADDQ $0x04, AX
+ JMP lz4s_s2_loop
+
+repeat_three_lz4_s2:
+ LEAL -4(R10), R10
+ MOVW $0x0015, (AX)
+ MOVB R10, 2(AX)
+ ADDQ $0x03, AX
+ JMP lz4s_s2_loop
+
+repeat_two_lz4_s2:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4s_s2_loop
+
+repeat_two_offset_lz4_s2:
+ XORQ R8, R8
+ LEAL 1(R8)(R10*4), R10
+ MOVB R9, 1(AX)
+ SARL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R10
+ MOVB R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4s_s2_loop
+
+lz4s_s2_docopy:
+ MOVQ R9, DI
+
+ // emitCopy
+ CMPL R10, $0x40
+ JBE two_byte_offset_short_lz4_s2
+ CMPL R9, $0x00000800
+ JAE long_offset_short_lz4_s2
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB R9, 1(AX)
+ MOVL R9, R11
+ SHRL $0x08, R11
+ SHLL $0x05, R11
+ ORL R11, R8
+ MOVB R8, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R10
+
+ // emitRepeat
+ LEAL -4(R10), R10
+ JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b
+
+emit_repeat_again_lz4_s2_emit_copy_short_2b:
+ MOVL R10, R8
+ LEAL -4(R10), R10
+ CMPL R8, $0x08
+ JBE repeat_two_lz4_s2_emit_copy_short_2b
+ CMPL R8, $0x0c
+ JAE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b
+ CMPL R9, $0x00000800
+ JB repeat_two_offset_lz4_s2_emit_copy_short_2b
+
+cant_repeat_two_offset_lz4_s2_emit_copy_short_2b:
+ CMPL R10, $0x00000104
+ JB repeat_three_lz4_s2_emit_copy_short_2b
+ CMPL R10, $0x00010100
+ JB repeat_four_lz4_s2_emit_copy_short_2b
+ CMPL R10, $0x0100ffff
+ JB repeat_five_lz4_s2_emit_copy_short_2b
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_lz4_s2_emit_copy_short_2b
+
+repeat_five_lz4_s2_emit_copy_short_2b:
+ LEAL -65536(R10), R10
+ MOVL R10, R9
+ MOVW $0x001d, (AX)
+ MOVW R10, 2(AX)
+ SARL $0x10, R9
+ MOVB R9, 4(AX)
+ ADDQ $0x05, AX
+ JMP lz4s_s2_loop
+
+repeat_four_lz4_s2_emit_copy_short_2b:
+ LEAL -256(R10), R10
+ MOVW $0x0019, (AX)
+ MOVW R10, 2(AX)
+ ADDQ $0x04, AX
+ JMP lz4s_s2_loop
+
+repeat_three_lz4_s2_emit_copy_short_2b:
+ LEAL -4(R10), R10
+ MOVW $0x0015, (AX)
+ MOVB R10, 2(AX)
+ ADDQ $0x03, AX
+ JMP lz4s_s2_loop
+
+repeat_two_lz4_s2_emit_copy_short_2b:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4s_s2_loop
+
+repeat_two_offset_lz4_s2_emit_copy_short_2b:
+ XORQ R8, R8
+ LEAL 1(R8)(R10*4), R10
+ MOVB R9, 1(AX)
+ SARL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R10
+ MOVB R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4s_s2_loop
+
+long_offset_short_lz4_s2:
+ MOVB $0xee, (AX)
+ MOVW R9, 1(AX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, AX
+
+ // emitRepeat
+emit_repeat_again_lz4_s2_emit_copy_short:
+ MOVL R10, R8
+ LEAL -4(R10), R10
+ CMPL R8, $0x08
+ JBE repeat_two_lz4_s2_emit_copy_short
+ CMPL R8, $0x0c
+ JAE cant_repeat_two_offset_lz4_s2_emit_copy_short
+ CMPL R9, $0x00000800
+ JB repeat_two_offset_lz4_s2_emit_copy_short
+
+cant_repeat_two_offset_lz4_s2_emit_copy_short:
+ CMPL R10, $0x00000104
+ JB repeat_three_lz4_s2_emit_copy_short
+ CMPL R10, $0x00010100
+ JB repeat_four_lz4_s2_emit_copy_short
+ CMPL R10, $0x0100ffff
+ JB repeat_five_lz4_s2_emit_copy_short
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_lz4_s2_emit_copy_short
+
+repeat_five_lz4_s2_emit_copy_short:
+ LEAL -65536(R10), R10
+ MOVL R10, R9
+ MOVW $0x001d, (AX)
+ MOVW R10, 2(AX)
+ SARL $0x10, R9
+ MOVB R9, 4(AX)
+ ADDQ $0x05, AX
+ JMP lz4s_s2_loop
+
+repeat_four_lz4_s2_emit_copy_short:
+ LEAL -256(R10), R10
+ MOVW $0x0019, (AX)
+ MOVW R10, 2(AX)
+ ADDQ $0x04, AX
+ JMP lz4s_s2_loop
+
+repeat_three_lz4_s2_emit_copy_short:
+ LEAL -4(R10), R10
+ MOVW $0x0015, (AX)
+ MOVB R10, 2(AX)
+ ADDQ $0x03, AX
+ JMP lz4s_s2_loop
+
+repeat_two_lz4_s2_emit_copy_short:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4s_s2_loop
+
+repeat_two_offset_lz4_s2_emit_copy_short:
+ XORQ R8, R8
+ LEAL 1(R8)(R10*4), R10
+ MOVB R9, 1(AX)
+ SARL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R10
+ MOVB R10, (AX)
+ ADDQ $0x02, AX
+ JMP lz4s_s2_loop
+
+two_byte_offset_short_lz4_s2:
+ MOVL R10, R8
+ SHLL $0x02, R8
+ CMPL R10, $0x0c
+ JAE emit_copy_three_lz4_s2
+ CMPL R9, $0x00000800
+ JAE emit_copy_three_lz4_s2
+ LEAL -15(R8), R8
+ MOVB R9, 1(AX)
+ SHRL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R8
+ MOVB R8, (AX)
+ ADDQ $0x02, AX
+ JMP lz4s_s2_loop
+
+emit_copy_three_lz4_s2:
+ LEAL -2(R8), R8
+ MOVB R8, (AX)
+ MOVW R9, 1(AX)
+ ADDQ $0x03, AX
+ JMP lz4s_s2_loop
+
+lz4s_s2_done:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ SI, uncompressed+48(FP)
+ MOVQ AX, dstUsed+56(FP)
+ RET
+
+lz4s_s2_corrupt:
+ XORQ AX, AX
+ LEAQ -1(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
+
+lz4s_s2_dstfull:
+ XORQ AX, AX
+ LEAQ -2(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
+
+// func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
+// Requires: SSE2
+TEXT ·cvtLZ4BlockSnappyAsm(SB), NOSPLIT, $0-64
+ XORQ SI, SI
+ MOVQ dst_base+0(FP), AX
+ MOVQ dst_len+8(FP), CX
+ MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), BX
+ LEAQ (DX)(BX*1), BX
+ LEAQ -10(AX)(CX*1), CX
+
+lz4_snappy_loop:
+ CMPQ DX, BX
+ JAE lz4_snappy_corrupt
+ CMPQ AX, CX
+ JAE lz4_snappy_dstfull
+ MOVBQZX (DX), DI
+ MOVQ DI, R8
+ MOVQ DI, R9
+ SHRQ $0x04, R8
+ ANDQ $0x0f, R9
+ CMPQ DI, $0xf0
+ JB lz4_snappy_ll_end
+
+lz4_snappy_ll_loop:
+ INCQ DX
+ CMPQ DX, BX
+ JAE lz4_snappy_corrupt
+ MOVBQZX (DX), DI
+ ADDQ DI, R8
+ CMPQ DI, $0xff
+ JEQ lz4_snappy_ll_loop
+
+lz4_snappy_ll_end:
+ LEAQ (DX)(R8*1), DI
+ ADDQ $0x04, R9
+ CMPQ DI, BX
+ JAE lz4_snappy_corrupt
+ INCQ DX
+ INCQ DI
+ TESTQ R8, R8
+ JZ lz4_snappy_lits_done
+ LEAQ (AX)(R8*1), R10
+ CMPQ R10, CX
+ JAE lz4_snappy_dstfull
+ ADDQ R8, SI
+ LEAL -1(R8), R10
+ CMPL R10, $0x3c
+ JB one_byte_lz4_snappy
+ CMPL R10, $0x00000100
+ JB two_bytes_lz4_snappy
+ CMPL R10, $0x00010000
+ JB three_bytes_lz4_snappy
+ CMPL R10, $0x01000000
+ JB four_bytes_lz4_snappy
+ MOVB $0xfc, (AX)
+ MOVL R10, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_lz4_snappy
+
+four_bytes_lz4_snappy:
+ MOVL R10, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (AX)
+ MOVW R10, 1(AX)
+ MOVB R11, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_lz4_snappy
+
+three_bytes_lz4_snappy:
+ MOVB $0xf4, (AX)
+ MOVW R10, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_lz4_snappy
+
+two_bytes_lz4_snappy:
+ MOVB $0xf0, (AX)
+ MOVB R10, 1(AX)
+ ADDQ $0x02, AX
+ CMPL R10, $0x40
+ JB memmove_lz4_snappy
+ JMP memmove_long_lz4_snappy
+
+one_byte_lz4_snappy:
+ SHLB $0x02, R10
+ MOVB R10, (AX)
+ ADDQ $0x01, AX
+
+memmove_lz4_snappy:
+ LEAQ (AX)(R8*1), R10
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_lz4_snappy_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_lz4_snappy_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_lz4_snappy_memmove_move_17through32
+ JMP emit_lit_memmove_lz4_snappy_memmove_move_33through64
+
+emit_lit_memmove_lz4_snappy_memmove_move_8:
+ MOVQ (DX), R11
+ MOVQ R11, (AX)
+ JMP memmove_end_copy_lz4_snappy
+
+emit_lit_memmove_lz4_snappy_memmove_move_8through16:
+ MOVQ (DX), R11
+ MOVQ -8(DX)(R8*1), DX
+ MOVQ R11, (AX)
+ MOVQ DX, -8(AX)(R8*1)
+ JMP memmove_end_copy_lz4_snappy
+
+emit_lit_memmove_lz4_snappy_memmove_move_17through32:
+ MOVOU (DX), X0
+ MOVOU -16(DX)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_lz4_snappy
+
+emit_lit_memmove_lz4_snappy_memmove_move_33through64:
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R8*1), X2
+ MOVOU -16(DX)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_lz4_snappy:
+ MOVQ R10, AX
+ JMP lz4_snappy_lits_emit_done
+
+memmove_long_lz4_snappy:
+ LEAQ (AX)(R8*1), R10
+
+ // genMemMoveLong
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R8*1), X2
+ MOVOU -16(DX)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R13
+ SUBQ R11, R13
+ DECQ R12
+ JA emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32
+ LEAQ -32(DX)(R13*1), R11
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_lz4_snappylarge_big_loop_back:
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R11
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_lz4_snappylarge_big_loop_back
+
+emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32:
+ MOVOU -32(DX)(R13*1), X4
+ MOVOU -16(DX)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ R10, AX
+
+lz4_snappy_lits_emit_done:
+ MOVQ DI, DX
+
+lz4_snappy_lits_done:
+ CMPQ DX, BX
+ JNE lz4_snappy_match
+ CMPQ R9, $0x04
+ JEQ lz4_snappy_done
+ JMP lz4_snappy_corrupt
+
+lz4_snappy_match:
+ LEAQ 2(DX), DI
+ CMPQ DI, BX
+ JAE lz4_snappy_corrupt
+ MOVWQZX (DX), R8
+ MOVQ DI, DX
+ TESTQ R8, R8
+ JZ lz4_snappy_corrupt
+ CMPQ R8, SI
+ JA lz4_snappy_corrupt
+ CMPQ R9, $0x13
+ JNE lz4_snappy_ml_done
+
+lz4_snappy_ml_loop:
+ MOVBQZX (DX), DI
+ INCQ DX
+ ADDQ DI, R9
+ CMPQ DX, BX
+ JAE lz4_snappy_corrupt
+ CMPQ DI, $0xff
+ JEQ lz4_snappy_ml_loop
+
+lz4_snappy_ml_done:
+ ADDQ R9, SI
+
+ // emitCopy
+two_byte_offset_lz4_s2:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_lz4_s2
+ MOVB $0xee, (AX)
+ MOVW R8, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ CMPQ AX, CX
+ JAE lz4_snappy_loop
+ JMP two_byte_offset_lz4_s2
+
+two_byte_offset_short_lz4_s2:
+ MOVL R9, DI
+ SHLL $0x02, DI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_lz4_s2
+ CMPL R8, $0x00000800
+ JAE emit_copy_three_lz4_s2
+ LEAL -15(DI), DI
+ MOVB R8, 1(AX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP lz4_snappy_loop
+
+emit_copy_three_lz4_s2:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW R8, 1(AX)
+ ADDQ $0x03, AX
+ JMP lz4_snappy_loop
+
+lz4_snappy_done:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ SI, uncompressed+48(FP)
+ MOVQ AX, dstUsed+56(FP)
+ RET
+
+lz4_snappy_corrupt:
+ XORQ AX, AX
+ LEAQ -1(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
+
+lz4_snappy_dstfull:
+ XORQ AX, AX
+ LEAQ -2(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
+
+// func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
+// Requires: SSE2
+TEXT ·cvtLZ4sBlockSnappyAsm(SB), NOSPLIT, $0-64
+ XORQ SI, SI
+ MOVQ dst_base+0(FP), AX
+ MOVQ dst_len+8(FP), CX
+ MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), BX
+ LEAQ (DX)(BX*1), BX
+ LEAQ -10(AX)(CX*1), CX
+
+lz4s_snappy_loop:
+ CMPQ DX, BX
+ JAE lz4s_snappy_corrupt
+ CMPQ AX, CX
+ JAE lz4s_snappy_dstfull
+ MOVBQZX (DX), DI
+ MOVQ DI, R8
+ MOVQ DI, R9
+ SHRQ $0x04, R8
+ ANDQ $0x0f, R9
+ CMPQ DI, $0xf0
+ JB lz4s_snappy_ll_end
+
+lz4s_snappy_ll_loop:
+ INCQ DX
+ CMPQ DX, BX
+ JAE lz4s_snappy_corrupt
+ MOVBQZX (DX), DI
+ ADDQ DI, R8
+ CMPQ DI, $0xff
+ JEQ lz4s_snappy_ll_loop
+
+lz4s_snappy_ll_end:
+ LEAQ (DX)(R8*1), DI
+ ADDQ $0x03, R9
+ CMPQ DI, BX
+ JAE lz4s_snappy_corrupt
+ INCQ DX
+ INCQ DI
+ TESTQ R8, R8
+ JZ lz4s_snappy_lits_done
+ LEAQ (AX)(R8*1), R10
+ CMPQ R10, CX
+ JAE lz4s_snappy_dstfull
+ ADDQ R8, SI
+ LEAL -1(R8), R10
+ CMPL R10, $0x3c
+ JB one_byte_lz4s_snappy
+ CMPL R10, $0x00000100
+ JB two_bytes_lz4s_snappy
+ CMPL R10, $0x00010000
+ JB three_bytes_lz4s_snappy
+ CMPL R10, $0x01000000
+ JB four_bytes_lz4s_snappy
+ MOVB $0xfc, (AX)
+ MOVL R10, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_lz4s_snappy
+
+four_bytes_lz4s_snappy:
+ MOVL R10, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (AX)
+ MOVW R10, 1(AX)
+ MOVB R11, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_lz4s_snappy
+
+three_bytes_lz4s_snappy:
+ MOVB $0xf4, (AX)
+ MOVW R10, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_lz4s_snappy
+
+two_bytes_lz4s_snappy:
+ MOVB $0xf0, (AX)
+ MOVB R10, 1(AX)
+ ADDQ $0x02, AX
+ CMPL R10, $0x40
+ JB memmove_lz4s_snappy
+ JMP memmove_long_lz4s_snappy
+
+one_byte_lz4s_snappy:
+ SHLB $0x02, R10
+ MOVB R10, (AX)
+ ADDQ $0x01, AX
+
+memmove_lz4s_snappy:
+ LEAQ (AX)(R8*1), R10
+
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JBE emit_lit_memmove_lz4s_snappy_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_lz4s_snappy_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_lz4s_snappy_memmove_move_17through32
+ JMP emit_lit_memmove_lz4s_snappy_memmove_move_33through64
+
+emit_lit_memmove_lz4s_snappy_memmove_move_8:
+ MOVQ (DX), R11
+ MOVQ R11, (AX)
+ JMP memmove_end_copy_lz4s_snappy
+
+emit_lit_memmove_lz4s_snappy_memmove_move_8through16:
+ MOVQ (DX), R11
+ MOVQ -8(DX)(R8*1), DX
+ MOVQ R11, (AX)
+ MOVQ DX, -8(AX)(R8*1)
+ JMP memmove_end_copy_lz4s_snappy
+
+emit_lit_memmove_lz4s_snappy_memmove_move_17through32:
+ MOVOU (DX), X0
+ MOVOU -16(DX)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_lz4s_snappy
+
+emit_lit_memmove_lz4s_snappy_memmove_move_33through64:
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R8*1), X2
+ MOVOU -16(DX)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+
+memmove_end_copy_lz4s_snappy:
+ MOVQ R10, AX
+ JMP lz4s_snappy_lits_emit_done
+
+memmove_long_lz4s_snappy:
+ LEAQ (AX)(R8*1), R10
+
+ // genMemMoveLong
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R8*1), X2
+ MOVOU -16(DX)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R13
+ SUBQ R11, R13
+ DECQ R12
+ JA emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32
+ LEAQ -32(DX)(R13*1), R11
+ LEAQ -32(AX)(R13*1), R14
+
+emit_lit_memmove_long_lz4s_snappylarge_big_loop_back:
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R11
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_lz4s_snappylarge_big_loop_back
+
+emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32:
+ MOVOU -32(DX)(R13*1), X4
+ MOVOU -16(DX)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ R10, AX
+
+lz4s_snappy_lits_emit_done:
+ MOVQ DI, DX
+
+lz4s_snappy_lits_done:
+ CMPQ DX, BX
+ JNE lz4s_snappy_match
+ CMPQ R9, $0x03
+ JEQ lz4s_snappy_done
+ JMP lz4s_snappy_corrupt
+
+lz4s_snappy_match:
+ CMPQ R9, $0x03
+ JEQ lz4s_snappy_loop
+ LEAQ 2(DX), DI
+ CMPQ DI, BX
+ JAE lz4s_snappy_corrupt
+ MOVWQZX (DX), R8
+ MOVQ DI, DX
+ TESTQ R8, R8
+ JZ lz4s_snappy_corrupt
+ CMPQ R8, SI
+ JA lz4s_snappy_corrupt
+ CMPQ R9, $0x12
+ JNE lz4s_snappy_ml_done
+
+lz4s_snappy_ml_loop:
+ MOVBQZX (DX), DI
+ INCQ DX
+ ADDQ DI, R9
+ CMPQ DX, BX
+ JAE lz4s_snappy_corrupt
+ CMPQ DI, $0xff
+ JEQ lz4s_snappy_ml_loop
+
+lz4s_snappy_ml_done:
+ ADDQ R9, SI
+
+ // emitCopy
+two_byte_offset_lz4_s2:
+ CMPL R9, $0x40
+ JBE two_byte_offset_short_lz4_s2
+ MOVB $0xee, (AX)
+ MOVW R8, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ CMPQ AX, CX
+ JAE lz4s_snappy_loop
+ JMP two_byte_offset_lz4_s2
+
+two_byte_offset_short_lz4_s2:
+ MOVL R9, DI
+ SHLL $0x02, DI
+ CMPL R9, $0x0c
+ JAE emit_copy_three_lz4_s2
+ CMPL R8, $0x00000800
+ JAE emit_copy_three_lz4_s2
+ LEAL -15(DI), DI
+ MOVB R8, 1(AX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP lz4s_snappy_loop
+
+emit_copy_three_lz4_s2:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW R8, 1(AX)
+ ADDQ $0x03, AX
+ JMP lz4s_snappy_loop
+
+lz4s_snappy_done:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ SI, uncompressed+48(FP)
+ MOVQ AX, dstUsed+56(FP)
+ RET
+
+lz4s_snappy_corrupt:
+ XORQ AX, AX
+ LEAQ -1(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
+
+lz4s_snappy_dstfull:
+ XORQ AX, AX
+ LEAQ -2(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/s2/index.go b/vendor/github.com/klauspost/compress/s2/index.go
new file mode 100644
index 0000000000..dd9ecfe718
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/index.go
@@ -0,0 +1,598 @@
+// Copyright (c) 2022+ Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "io"
+ "sort"
+)
+
+const (
+ S2IndexHeader = "s2idx\x00"
+ S2IndexTrailer = "\x00xdi2s"
+ maxIndexEntries = 1 << 16
+)
+
+// Index represents an S2/Snappy index.
+type Index struct {
+ TotalUncompressed int64 // Total Uncompressed size if known. Will be -1 if unknown.
+ TotalCompressed int64 // Total Compressed size if known. Will be -1 if unknown.
+ info []struct {
+ compressedOffset int64
+ uncompressedOffset int64
+ }
+ estBlockUncomp int64
+}
+
+func (i *Index) reset(maxBlock int) {
+ i.estBlockUncomp = int64(maxBlock)
+ i.TotalCompressed = -1
+ i.TotalUncompressed = -1
+ if len(i.info) > 0 {
+ i.info = i.info[:0]
+ }
+}
+
+// allocInfos will allocate an empty slice of infos.
+func (i *Index) allocInfos(n int) {
+ if n > maxIndexEntries {
+ panic("n > maxIndexEntries")
+ }
+ i.info = make([]struct {
+ compressedOffset int64
+ uncompressedOffset int64
+ }, 0, n)
+}
+
+// add an uncompressed and compressed pair.
+// Entries must be sent in order.
+func (i *Index) add(compressedOffset, uncompressedOffset int64) error {
+ if i == nil {
+ return nil
+ }
+ lastIdx := len(i.info) - 1
+ if lastIdx >= 0 {
+ latest := i.info[lastIdx]
+ if latest.uncompressedOffset == uncompressedOffset {
+ // Uncompressed didn't change, don't add entry,
+ // but update start index.
+ latest.compressedOffset = compressedOffset
+ i.info[lastIdx] = latest
+ return nil
+ }
+ if latest.uncompressedOffset > uncompressedOffset {
+ return fmt.Errorf("internal error: Earlier uncompressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset)
+ }
+ if latest.compressedOffset > compressedOffset {
+ return fmt.Errorf("internal error: Earlier compressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset)
+ }
+ }
+ i.info = append(i.info, struct {
+ compressedOffset int64
+ uncompressedOffset int64
+ }{compressedOffset: compressedOffset, uncompressedOffset: uncompressedOffset})
+ return nil
+}
+
+// Find the offset at or before the wanted (uncompressed) offset.
+// If offset is 0 or positive it is the offset from the beginning of the file.
+// If the uncompressed size is known, the offset must be within the file.
+// If an offset outside the file is requested io.ErrUnexpectedEOF is returned.
+// If the offset is negative, it is interpreted as the distance from the end of the file,
+// where -1 represents the last byte.
+// If offset from the end of the file is requested, but size is unknown,
+// ErrUnsupported will be returned.
+func (i *Index) Find(offset int64) (compressedOff, uncompressedOff int64, err error) {
+ if i.TotalUncompressed < 0 {
+ return 0, 0, ErrCorrupt
+ }
+ if offset < 0 {
+ offset = i.TotalUncompressed + offset
+ if offset < 0 {
+ return 0, 0, io.ErrUnexpectedEOF
+ }
+ }
+ if offset > i.TotalUncompressed {
+ return 0, 0, io.ErrUnexpectedEOF
+ }
+ if len(i.info) > 200 {
+ n := sort.Search(len(i.info), func(n int) bool {
+ return i.info[n].uncompressedOffset > offset
+ })
+ if n == 0 {
+ n = 1
+ }
+ return i.info[n-1].compressedOffset, i.info[n-1].uncompressedOffset, nil
+ }
+ for _, info := range i.info {
+ if info.uncompressedOffset > offset {
+ break
+ }
+ compressedOff = info.compressedOffset
+ uncompressedOff = info.uncompressedOffset
+ }
+ return compressedOff, uncompressedOff, nil
+}
+
+// reduce to stay below maxIndexEntries
+func (i *Index) reduce() {
+ if len(i.info) < maxIndexEntries && i.estBlockUncomp >= 1<<20 {
+ return
+ }
+
+ // Algorithm, keep 1, remove removeN entries...
+ removeN := (len(i.info) + 1) / maxIndexEntries
+ src := i.info
+ j := 0
+
+ // Each block should be at least 1MB, but don't reduce below 1000 entries.
+ for i.estBlockUncomp*(int64(removeN)+1) < 1<<20 && len(i.info)/(removeN+1) > 1000 {
+ removeN++
+ }
+ for idx := 0; idx < len(src); idx++ {
+ i.info[j] = src[idx]
+ j++
+ idx += removeN
+ }
+ i.info = i.info[:j]
+ // Update maxblock estimate.
+ i.estBlockUncomp += i.estBlockUncomp * int64(removeN)
+}
+
+func (i *Index) appendTo(b []byte, uncompTotal, compTotal int64) []byte {
+ i.reduce()
+ var tmp [binary.MaxVarintLen64]byte
+
+ initSize := len(b)
+ // We make the start a skippable header+size.
+ b = append(b, ChunkTypeIndex, 0, 0, 0)
+ b = append(b, []byte(S2IndexHeader)...)
+ // Total Uncompressed size
+ n := binary.PutVarint(tmp[:], uncompTotal)
+ b = append(b, tmp[:n]...)
+ // Total Compressed size
+ n = binary.PutVarint(tmp[:], compTotal)
+ b = append(b, tmp[:n]...)
+ // Put EstBlockUncomp size
+ n = binary.PutVarint(tmp[:], i.estBlockUncomp)
+ b = append(b, tmp[:n]...)
+ // Put length
+ n = binary.PutVarint(tmp[:], int64(len(i.info)))
+ b = append(b, tmp[:n]...)
+
+ // Check if we should add uncompressed offsets
+ var hasUncompressed byte
+ for idx, info := range i.info {
+ if idx == 0 {
+ if info.uncompressedOffset != 0 {
+ hasUncompressed = 1
+ break
+ }
+ continue
+ }
+ if info.uncompressedOffset != i.info[idx-1].uncompressedOffset+i.estBlockUncomp {
+ hasUncompressed = 1
+ break
+ }
+ }
+ b = append(b, hasUncompressed)
+
+ // Add each entry
+ if hasUncompressed == 1 {
+ for idx, info := range i.info {
+ uOff := info.uncompressedOffset
+ if idx > 0 {
+ prev := i.info[idx-1]
+ uOff -= prev.uncompressedOffset + (i.estBlockUncomp)
+ }
+ n = binary.PutVarint(tmp[:], uOff)
+ b = append(b, tmp[:n]...)
+ }
+ }
+
+ // Initial compressed size estimate.
+ cPredict := i.estBlockUncomp / 2
+
+ for idx, info := range i.info {
+ cOff := info.compressedOffset
+ if idx > 0 {
+ prev := i.info[idx-1]
+ cOff -= prev.compressedOffset + cPredict
+ // Update compressed size prediction, with half the error.
+ cPredict += cOff / 2
+ }
+ n = binary.PutVarint(tmp[:], cOff)
+ b = append(b, tmp[:n]...)
+ }
+
+ // Add Total Size.
+ // Stored as fixed size for easier reading.
+ binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)-initSize+4+len(S2IndexTrailer)))
+ b = append(b, tmp[:4]...)
+ // Trailer
+ b = append(b, []byte(S2IndexTrailer)...)
+
+ // Update size
+ chunkLen := len(b) - initSize - skippableFrameHeader
+ b[initSize+1] = uint8(chunkLen >> 0)
+ b[initSize+2] = uint8(chunkLen >> 8)
+ b[initSize+3] = uint8(chunkLen >> 16)
+ //fmt.Printf("chunklen: 0x%x Uncomp:%d, Comp:%d\n", chunkLen, uncompTotal, compTotal)
+ return b
+}
+
+// Load a binary index.
+// A zero value Index can be used or a previous one can be reused.
+func (i *Index) Load(b []byte) ([]byte, error) {
+ if len(b) <= 4+len(S2IndexHeader)+len(S2IndexTrailer) {
+ return b, io.ErrUnexpectedEOF
+ }
+ if b[0] != ChunkTypeIndex {
+ return b, ErrCorrupt
+ }
+ chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16
+ b = b[4:]
+
+ // Validate we have enough...
+ if len(b) < chunkLen {
+ return b, io.ErrUnexpectedEOF
+ }
+ if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) {
+ return b, ErrUnsupported
+ }
+ b = b[len(S2IndexHeader):]
+
+ // Total Uncompressed
+ if v, n := binary.Varint(b); n <= 0 || v < 0 {
+ return b, ErrCorrupt
+ } else {
+ i.TotalUncompressed = v
+ b = b[n:]
+ }
+
+ // Total Compressed
+ if v, n := binary.Varint(b); n <= 0 {
+ return b, ErrCorrupt
+ } else {
+ i.TotalCompressed = v
+ b = b[n:]
+ }
+
+ // Read EstBlockUncomp
+ if v, n := binary.Varint(b); n <= 0 {
+ return b, ErrCorrupt
+ } else {
+ if v < 0 {
+ return b, ErrCorrupt
+ }
+ i.estBlockUncomp = v
+ b = b[n:]
+ }
+
+ var entries int
+ if v, n := binary.Varint(b); n <= 0 {
+ return b, ErrCorrupt
+ } else {
+ if v < 0 || v > maxIndexEntries {
+ return b, ErrCorrupt
+ }
+ entries = int(v)
+ b = b[n:]
+ }
+ if cap(i.info) < entries {
+ i.allocInfos(entries)
+ }
+ i.info = i.info[:entries]
+
+ if len(b) < 1 {
+ return b, io.ErrUnexpectedEOF
+ }
+ hasUncompressed := b[0]
+ b = b[1:]
+ if hasUncompressed&1 != hasUncompressed {
+ return b, ErrCorrupt
+ }
+
+ // Add each uncompressed entry
+ for idx := range i.info {
+ var uOff int64
+ if hasUncompressed != 0 {
+ // Load delta
+ if v, n := binary.Varint(b); n <= 0 {
+ return b, ErrCorrupt
+ } else {
+ uOff = v
+ b = b[n:]
+ }
+ }
+
+ if idx > 0 {
+ prev := i.info[idx-1].uncompressedOffset
+ uOff += prev + (i.estBlockUncomp)
+ if uOff <= prev {
+ return b, ErrCorrupt
+ }
+ }
+ if uOff < 0 {
+ return b, ErrCorrupt
+ }
+ i.info[idx].uncompressedOffset = uOff
+ }
+
+ // Initial compressed size estimate.
+ cPredict := i.estBlockUncomp / 2
+
+ // Add each compressed entry
+ for idx := range i.info {
+ var cOff int64
+ if v, n := binary.Varint(b); n <= 0 {
+ return b, ErrCorrupt
+ } else {
+ cOff = v
+ b = b[n:]
+ }
+
+ if idx > 0 {
+ // Update compressed size prediction, with half the error.
+ cPredictNew := cPredict + cOff/2
+
+ prev := i.info[idx-1].compressedOffset
+ cOff += prev + cPredict
+ if cOff <= prev {
+ return b, ErrCorrupt
+ }
+ cPredict = cPredictNew
+ }
+ if cOff < 0 {
+ return b, ErrCorrupt
+ }
+ i.info[idx].compressedOffset = cOff
+ }
+ if len(b) < 4+len(S2IndexTrailer) {
+ return b, io.ErrUnexpectedEOF
+ }
+ // Skip size...
+ b = b[4:]
+
+ // Check trailer...
+ if !bytes.Equal(b[:len(S2IndexTrailer)], []byte(S2IndexTrailer)) {
+ return b, ErrCorrupt
+ }
+ return b[len(S2IndexTrailer):], nil
+}
+
+// LoadStream will load an index from the end of the supplied stream.
+// ErrUnsupported will be returned if the signature cannot be found.
+// ErrCorrupt will be returned if unexpected values are found.
+// io.ErrUnexpectedEOF is returned if there are too few bytes.
+// IO errors are returned as-is.
+func (i *Index) LoadStream(rs io.ReadSeeker) error {
+ // Go to end.
+ _, err := rs.Seek(-10, io.SeekEnd)
+ if err != nil {
+ return err
+ }
+ var tmp [10]byte
+ _, err = io.ReadFull(rs, tmp[:])
+ if err != nil {
+ return err
+ }
+ // Check trailer...
+ if !bytes.Equal(tmp[4:4+len(S2IndexTrailer)], []byte(S2IndexTrailer)) {
+ return ErrUnsupported
+ }
+ sz := binary.LittleEndian.Uint32(tmp[:4])
+ if sz > maxChunkSize+skippableFrameHeader {
+ return ErrCorrupt
+ }
+ _, err = rs.Seek(-int64(sz), io.SeekEnd)
+ if err != nil {
+ return err
+ }
+
+ // Read index.
+ buf := make([]byte, sz)
+ _, err = io.ReadFull(rs, buf)
+ if err != nil {
+ return err
+ }
+ _, err = i.Load(buf)
+ return err
+}
+
+// IndexStream will return an index for a stream.
+// The stream structure will be checked, but
+// data within blocks is not verified.
+// The returned index can either be appended to the end of the stream
+// or stored separately.
+func IndexStream(r io.Reader) ([]byte, error) {
+ var i Index
+ var buf [maxChunkSize]byte
+ var readHeader bool
+ for {
+ _, err := io.ReadFull(r, buf[:4])
+ if err != nil {
+ if err == io.EOF {
+ return i.appendTo(nil, i.TotalUncompressed, i.TotalCompressed), nil
+ }
+ return nil, err
+ }
+ // Start of this chunk.
+ startChunk := i.TotalCompressed
+ i.TotalCompressed += 4
+
+ chunkType := buf[0]
+ if !readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ return nil, ErrCorrupt
+ }
+ readHeader = true
+ }
+ chunkLen := int(buf[1]) | int(buf[2])<<8 | int(buf[3])<<16
+ if chunkLen < checksumSize {
+ return nil, ErrCorrupt
+ }
+
+ i.TotalCompressed += int64(chunkLen)
+ _, err = io.ReadFull(r, buf[:chunkLen])
+ if err != nil {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ // Skip checksum.
+ dLen, err := DecodedLen(buf[checksumSize:])
+ if err != nil {
+ return nil, err
+ }
+ if dLen > maxBlockSize {
+ return nil, ErrCorrupt
+ }
+ if i.estBlockUncomp == 0 {
+ // Use first block for estimate...
+ i.estBlockUncomp = int64(dLen)
+ }
+ err = i.add(startChunk, i.TotalUncompressed)
+ if err != nil {
+ return nil, err
+ }
+ i.TotalUncompressed += int64(dLen)
+ continue
+ case chunkTypeUncompressedData:
+ n2 := chunkLen - checksumSize
+ if n2 > maxBlockSize {
+ return nil, ErrCorrupt
+ }
+ if i.estBlockUncomp == 0 {
+ // Use first block for estimate...
+ i.estBlockUncomp = int64(n2)
+ }
+ err = i.add(startChunk, i.TotalUncompressed)
+ if err != nil {
+ return nil, err
+ }
+ i.TotalUncompressed += int64(n2)
+ continue
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ return nil, ErrCorrupt
+ }
+
+ if string(buf[:len(magicBody)]) != magicBody {
+ if string(buf[:len(magicBody)]) != magicBodySnappy {
+ return nil, ErrCorrupt
+ }
+ }
+
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ return nil, ErrUnsupported
+ }
+ if chunkLen > maxChunkSize {
+ return nil, ErrUnsupported
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ }
+}
+
+// JSON returns the index as JSON text.
+func (i *Index) JSON() []byte {
+ x := struct {
+ TotalUncompressed int64 `json:"total_uncompressed"` // Total Uncompressed size if known. Will be -1 if unknown.
+ TotalCompressed int64 `json:"total_compressed"` // Total Compressed size if known. Will be -1 if unknown.
+ Offsets []struct {
+ CompressedOffset int64 `json:"compressed"`
+ UncompressedOffset int64 `json:"uncompressed"`
+ } `json:"offsets"`
+ EstBlockUncomp int64 `json:"est_block_uncompressed"`
+ }{
+ TotalUncompressed: i.TotalUncompressed,
+ TotalCompressed: i.TotalCompressed,
+ EstBlockUncomp: i.estBlockUncomp,
+ }
+ for _, v := range i.info {
+ x.Offsets = append(x.Offsets, struct {
+ CompressedOffset int64 `json:"compressed"`
+ UncompressedOffset int64 `json:"uncompressed"`
+ }{CompressedOffset: v.compressedOffset, UncompressedOffset: v.uncompressedOffset})
+ }
+ b, _ := json.MarshalIndent(x, "", " ")
+ return b
+}
+
+// RemoveIndexHeaders will trim all headers and trailers from a given index.
+// This is expected to save 20 bytes.
+// These can be restored using RestoreIndexHeaders.
+// This removes a layer of security, but is the most compact representation.
+// Returns nil if headers contains errors.
+// The returned slice references the provided slice.
+func RemoveIndexHeaders(b []byte) []byte {
+ const save = 4 + len(S2IndexHeader) + len(S2IndexTrailer) + 4
+ if len(b) <= save {
+ return nil
+ }
+ if b[0] != ChunkTypeIndex {
+ return nil
+ }
+ chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16
+ b = b[4:]
+
+ // Validate we have enough...
+ if len(b) < chunkLen {
+ return nil
+ }
+ b = b[:chunkLen]
+
+ if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) {
+ return nil
+ }
+ b = b[len(S2IndexHeader):]
+ if !bytes.HasSuffix(b, []byte(S2IndexTrailer)) {
+ return nil
+ }
+ b = bytes.TrimSuffix(b, []byte(S2IndexTrailer))
+
+ if len(b) < 4 {
+ return nil
+ }
+ return b[:len(b)-4]
+}
+
+// RestoreIndexHeaders will index restore headers removed by RemoveIndexHeaders.
+// No error checking is performed on the input.
+// If a 0 length slice is sent, it is returned without modification.
+func RestoreIndexHeaders(in []byte) []byte {
+ if len(in) == 0 {
+ return in
+ }
+ b := make([]byte, 0, 4+len(S2IndexHeader)+len(in)+len(S2IndexTrailer)+4)
+ b = append(b, ChunkTypeIndex, 0, 0, 0)
+ b = append(b, []byte(S2IndexHeader)...)
+ b = append(b, in...)
+
+ var tmp [4]byte
+ binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)+4+len(S2IndexTrailer)))
+ b = append(b, tmp[:4]...)
+ // Trailer
+ b = append(b, []byte(S2IndexTrailer)...)
+
+ chunkLen := len(b) - skippableFrameHeader
+ b[1] = uint8(chunkLen >> 0)
+ b[2] = uint8(chunkLen >> 8)
+ b[3] = uint8(chunkLen >> 16)
+ return b
+}
diff --git a/vendor/github.com/klauspost/compress/s2/lz4convert.go b/vendor/github.com/klauspost/compress/s2/lz4convert.go
new file mode 100644
index 0000000000..46ed908e3c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/lz4convert.go
@@ -0,0 +1,585 @@
+// Copyright (c) 2022 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+)
+
+// LZ4Converter provides conversion from LZ4 blocks as defined here:
+// https://github.com/lz4/lz4/blob/dev/doc/lz4_Block_format.md
+type LZ4Converter struct {
+}
+
+// ErrDstTooSmall is returned when provided destination is too small.
+var ErrDstTooSmall = errors.New("s2: destination too small")
+
+// ConvertBlock will convert an LZ4 block and append it as an S2
+// block without block length to dst.
+// The uncompressed size is returned as well.
+// dst must have capacity to contain the entire compressed block.
+func (l *LZ4Converter) ConvertBlock(dst, src []byte) ([]byte, int, error) {
+ if len(src) == 0 {
+ return dst, 0, nil
+ }
+ const debug = false
+ const inline = true
+ const lz4MinMatch = 4
+
+ s, d := 0, len(dst)
+ dst = dst[:cap(dst)]
+ if !debug && hasAmd64Asm {
+ res, sz := cvtLZ4BlockAsm(dst[d:], src)
+ if res < 0 {
+ const (
+ errCorrupt = -1
+ errDstTooSmall = -2
+ )
+ switch res {
+ case errCorrupt:
+ return nil, 0, ErrCorrupt
+ case errDstTooSmall:
+ return nil, 0, ErrDstTooSmall
+ default:
+ return nil, 0, fmt.Errorf("unexpected result: %d", res)
+ }
+ }
+ if d+sz > len(dst) {
+ return nil, 0, ErrDstTooSmall
+ }
+ return dst[:d+sz], res, nil
+ }
+
+ dLimit := len(dst) - 10
+ var lastOffset uint16
+ var uncompressed int
+ if debug {
+ fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
+ }
+
+ for {
+ if s >= len(src) {
+ return dst[:d], 0, ErrCorrupt
+ }
+ // Read literal info
+ token := src[s]
+ ll := int(token >> 4)
+ ml := int(lz4MinMatch + (token & 0xf))
+
+ // If upper nibble is 15, literal length is extended
+ if token >= 0xf0 {
+ for {
+ s++
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return dst[:d], 0, ErrCorrupt
+ }
+ val := src[s]
+ ll += int(val)
+ if val != 255 {
+ break
+ }
+ }
+ }
+ // Skip past token
+ if s+ll >= len(src) {
+ if debug {
+ fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ s++
+ if ll > 0 {
+ if d+ll > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ if debug {
+ fmt.Printf("emit %d literals\n", ll)
+ }
+ d += emitLiteralGo(dst[d:], src[s:s+ll])
+ s += ll
+ uncompressed += ll
+ }
+
+ // Check if we are done...
+ if s == len(src) && ml == lz4MinMatch {
+ break
+ }
+ // 2 byte offset
+ if s >= len(src)-2 {
+ if debug {
+ fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ offset := binary.LittleEndian.Uint16(src[s:])
+ s += 2
+ if offset == 0 {
+ if debug {
+ fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ if int(offset) > uncompressed {
+ if debug {
+ fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
+ }
+ return nil, 0, ErrCorrupt
+ }
+
+ if ml == lz4MinMatch+15 {
+ for {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ val := src[s]
+ s++
+ ml += int(val)
+ if val != 255 {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ break
+ }
+ }
+ }
+ if offset == lastOffset {
+ if debug {
+ fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset)
+ }
+ if !inline {
+ d += emitRepeat16(dst[d:], offset, ml)
+ } else {
+ length := ml
+ dst := dst[d:]
+ for len(dst) > 5 {
+ // Repeat offset, make length cheaper
+ length -= 4
+ if length <= 4 {
+ dst[0] = uint8(length)<<2 | tagCopy1
+ dst[1] = 0
+ d += 2
+ break
+ }
+ if length < 8 && offset < 2048 {
+ // Encode WITH offset
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
+ d += 2
+ break
+ }
+ if length < (1<<8)+4 {
+ length -= 4
+ dst[2] = uint8(length)
+ dst[1] = 0
+ dst[0] = 5<<2 | tagCopy1
+ d += 3
+ break
+ }
+ if length < (1<<16)+(1<<8) {
+ length -= 1 << 8
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 6<<2 | tagCopy1
+ d += 4
+ break
+ }
+ const maxRepeat = (1 << 24) - 1
+ length -= 1 << 16
+ left := 0
+ if length > maxRepeat {
+ left = length - maxRepeat + 4
+ length = maxRepeat - 4
+ }
+ dst[4] = uint8(length >> 16)
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 7<<2 | tagCopy1
+ if left > 0 {
+ d += 5 + emitRepeat16(dst[5:], offset, left)
+ break
+ }
+ d += 5
+ break
+ }
+ }
+ } else {
+ if debug {
+ fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
+ }
+ if !inline {
+ d += emitCopy16(dst[d:], offset, ml)
+ } else {
+ length := ml
+ dst := dst[d:]
+ for len(dst) > 5 {
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ off := 3
+ if offset < 2048 {
+ // emit 8 bytes as tagCopy1, rest as repeats.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
+ length -= 8
+ off = 2
+ } else {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ // Emit remaining as repeat value (minimum 4 bytes).
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = 59<<2 | tagCopy2
+ length -= 60
+ }
+ // Emit remaining as repeats, at least 4 bytes remain.
+ d += off + emitRepeat16(dst[off:], offset, length)
+ break
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = uint8(length-1)<<2 | tagCopy2
+ d += 3
+ break
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ d += 2
+ break
+ }
+ }
+ lastOffset = offset
+ }
+ uncompressed += ml
+ if d > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ }
+
+ return dst[:d], uncompressed, nil
+}
+
+// ConvertBlockSnappy will convert an LZ4 block and append it
+// as a Snappy block without block length to dst.
+// The uncompressed size is returned as well.
+// dst must have capacity to contain the entire compressed block.
+func (l *LZ4Converter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) {
+ if len(src) == 0 {
+ return dst, 0, nil
+ }
+ const debug = false
+ const lz4MinMatch = 4
+
+ s, d := 0, len(dst)
+ dst = dst[:cap(dst)]
+ // Use assembly when possible
+ if !debug && hasAmd64Asm {
+ res, sz := cvtLZ4BlockSnappyAsm(dst[d:], src)
+ if res < 0 {
+ const (
+ errCorrupt = -1
+ errDstTooSmall = -2
+ )
+ switch res {
+ case errCorrupt:
+ return nil, 0, ErrCorrupt
+ case errDstTooSmall:
+ return nil, 0, ErrDstTooSmall
+ default:
+ return nil, 0, fmt.Errorf("unexpected result: %d", res)
+ }
+ }
+ if d+sz > len(dst) {
+ return nil, 0, ErrDstTooSmall
+ }
+ return dst[:d+sz], res, nil
+ }
+
+ dLimit := len(dst) - 10
+ var uncompressed int
+ if debug {
+ fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
+ }
+
+ for {
+ if s >= len(src) {
+ return nil, 0, ErrCorrupt
+ }
+ // Read literal info
+ token := src[s]
+ ll := int(token >> 4)
+ ml := int(lz4MinMatch + (token & 0xf))
+
+ // If upper nibble is 15, literal length is extended
+ if token >= 0xf0 {
+ for {
+ s++
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ val := src[s]
+ ll += int(val)
+ if val != 255 {
+ break
+ }
+ }
+ }
+ // Skip past token
+ if s+ll >= len(src) {
+ if debug {
+ fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ s++
+ if ll > 0 {
+ if d+ll > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ if debug {
+ fmt.Printf("emit %d literals\n", ll)
+ }
+ d += emitLiteralGo(dst[d:], src[s:s+ll])
+ s += ll
+ uncompressed += ll
+ }
+
+ // Check if we are done...
+ if s == len(src) && ml == lz4MinMatch {
+ break
+ }
+ // 2 byte offset
+ if s >= len(src)-2 {
+ if debug {
+ fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ offset := binary.LittleEndian.Uint16(src[s:])
+ s += 2
+ if offset == 0 {
+ if debug {
+ fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ if int(offset) > uncompressed {
+ if debug {
+ fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
+ }
+ return nil, 0, ErrCorrupt
+ }
+
+ if ml == lz4MinMatch+15 {
+ for {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ val := src[s]
+ s++
+ ml += int(val)
+ if val != 255 {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ break
+ }
+ }
+ }
+ if debug {
+ fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
+ }
+ length := ml
+ // d += emitCopyNoRepeat(dst[d:], int(offset), ml)
+ for length > 0 {
+ if d >= dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ // Emit a length 64 copy, encoded as 3 bytes.
+ dst[d+2] = uint8(offset >> 8)
+ dst[d+1] = uint8(offset)
+ dst[d+0] = 63<<2 | tagCopy2
+ length -= 64
+ d += 3
+ continue
+ }
+ if length >= 12 || offset >= 2048 || length < 4 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[d+2] = uint8(offset >> 8)
+ dst[d+1] = uint8(offset)
+ dst[d+0] = uint8(length-1)<<2 | tagCopy2
+ d += 3
+ break
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[d+1] = uint8(offset)
+ dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ d += 2
+ break
+ }
+ uncompressed += ml
+ if d > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ }
+
+ return dst[:d], uncompressed, nil
+}
+
+// emitRepeat writes a repeat chunk and returns the number of bytes written.
+// Length must be at least 4 and < 1<<24
+func emitRepeat16(dst []byte, offset uint16, length int) int {
+ // Repeat offset, make length cheaper
+ length -= 4
+ if length <= 4 {
+ dst[0] = uint8(length)<<2 | tagCopy1
+ dst[1] = 0
+ return 2
+ }
+ if length < 8 && offset < 2048 {
+ // Encode WITH offset
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
+ return 2
+ }
+ if length < (1<<8)+4 {
+ length -= 4
+ dst[2] = uint8(length)
+ dst[1] = 0
+ dst[0] = 5<<2 | tagCopy1
+ return 3
+ }
+ if length < (1<<16)+(1<<8) {
+ length -= 1 << 8
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 6<<2 | tagCopy1
+ return 4
+ }
+ const maxRepeat = (1 << 24) - 1
+ length -= 1 << 16
+ left := 0
+ if length > maxRepeat {
+ left = length - maxRepeat + 4
+ length = maxRepeat - 4
+ }
+ dst[4] = uint8(length >> 16)
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 7<<2 | tagCopy1
+ if left > 0 {
+ return 5 + emitRepeat16(dst[5:], offset, left)
+ }
+ return 5
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= math.MaxUint16
+// 4 <= length && length <= math.MaxUint32
+func emitCopy16(dst []byte, offset uint16, length int) int {
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ off := 3
+ if offset < 2048 {
+ // emit 8 bytes as tagCopy1, rest as repeats.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
+ length -= 8
+ off = 2
+ } else {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ // Emit remaining as repeat value (minimum 4 bytes).
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = 59<<2 | tagCopy2
+ length -= 60
+ }
+ // Emit remaining as repeats, at least 4 bytes remain.
+ return off + emitRepeat16(dst[off:], offset, length)
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = uint8(length-1)<<2 | tagCopy2
+ return 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ return 2
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 0 <= len(lit) && len(lit) <= math.MaxUint32
+func emitLiteralGo(dst, lit []byte) int {
+ if len(lit) == 0 {
+ return 0
+ }
+ i, n := 0, uint(len(lit)-1)
+ switch {
+ case n < 60:
+ dst[0] = uint8(n)<<2 | tagLiteral
+ i = 1
+ case n < 1<<8:
+ dst[1] = uint8(n)
+ dst[0] = 60<<2 | tagLiteral
+ i = 2
+ case n < 1<<16:
+ dst[2] = uint8(n >> 8)
+ dst[1] = uint8(n)
+ dst[0] = 61<<2 | tagLiteral
+ i = 3
+ case n < 1<<24:
+ dst[3] = uint8(n >> 16)
+ dst[2] = uint8(n >> 8)
+ dst[1] = uint8(n)
+ dst[0] = 62<<2 | tagLiteral
+ i = 4
+ default:
+ dst[4] = uint8(n >> 24)
+ dst[3] = uint8(n >> 16)
+ dst[2] = uint8(n >> 8)
+ dst[1] = uint8(n)
+ dst[0] = 63<<2 | tagLiteral
+ i = 5
+ }
+ return i + copy(dst[i:], lit)
+}
diff --git a/vendor/github.com/klauspost/compress/s2/lz4sconvert.go b/vendor/github.com/klauspost/compress/s2/lz4sconvert.go
new file mode 100644
index 0000000000..000f39719c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/lz4sconvert.go
@@ -0,0 +1,467 @@
+// Copyright (c) 2022 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "encoding/binary"
+ "fmt"
+)
+
+// LZ4sConverter provides conversion from LZ4s.
+// (Intel modified LZ4 Blocks)
+// https://cdrdv2-public.intel.com/743912/743912-qat-programmers-guide-v2.0.pdf
+// LZ4s is a variant of LZ4 block format. LZ4s should be considered as an intermediate compressed block format.
+// The LZ4s format is selected when the application sets the compType to CPA_DC_LZ4S in CpaDcSessionSetupData.
+// The LZ4s block returned by the Intel® QAT hardware can be used by an external
+// software post-processing to generate other compressed data formats.
+// The following table lists the differences between LZ4 and LZ4s block format. LZ4s block format uses
+// the same high-level formatting as LZ4 block format with the following encoding changes:
+// For Min Match of 4 bytes, Copy length value 1-15 means length 4-18 with 18 bytes adding an extra byte.
+// ONLY "Min match of 4 bytes" is supported.
+type LZ4sConverter struct {
+}
+
+// ConvertBlock will convert an LZ4s block and append it as an S2
+// block without block length to dst.
+// The uncompressed size is returned as well.
+// dst must have capacity to contain the entire compressed block.
+func (l *LZ4sConverter) ConvertBlock(dst, src []byte) ([]byte, int, error) {
+ if len(src) == 0 {
+ return dst, 0, nil
+ }
+ const debug = false
+ const inline = true
+ const lz4MinMatch = 3
+
+ s, d := 0, len(dst)
+ dst = dst[:cap(dst)]
+ if !debug && hasAmd64Asm {
+ res, sz := cvtLZ4sBlockAsm(dst[d:], src)
+ if res < 0 {
+ const (
+ errCorrupt = -1
+ errDstTooSmall = -2
+ )
+ switch res {
+ case errCorrupt:
+ return nil, 0, ErrCorrupt
+ case errDstTooSmall:
+ return nil, 0, ErrDstTooSmall
+ default:
+ return nil, 0, fmt.Errorf("unexpected result: %d", res)
+ }
+ }
+ if d+sz > len(dst) {
+ return nil, 0, ErrDstTooSmall
+ }
+ return dst[:d+sz], res, nil
+ }
+
+ dLimit := len(dst) - 10
+ var lastOffset uint16
+ var uncompressed int
+ if debug {
+ fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
+ }
+
+ for {
+ if s >= len(src) {
+ return dst[:d], 0, ErrCorrupt
+ }
+ // Read literal info
+ token := src[s]
+ ll := int(token >> 4)
+ ml := int(lz4MinMatch + (token & 0xf))
+
+ // If upper nibble is 15, literal length is extended
+ if token >= 0xf0 {
+ for {
+ s++
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return dst[:d], 0, ErrCorrupt
+ }
+ val := src[s]
+ ll += int(val)
+ if val != 255 {
+ break
+ }
+ }
+ }
+ // Skip past token
+ if s+ll >= len(src) {
+ if debug {
+ fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ s++
+ if ll > 0 {
+ if d+ll > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ if debug {
+ fmt.Printf("emit %d literals\n", ll)
+ }
+ d += emitLiteralGo(dst[d:], src[s:s+ll])
+ s += ll
+ uncompressed += ll
+ }
+
+ // Check if we are done...
+ if ml == lz4MinMatch {
+ if s == len(src) {
+ break
+ }
+ // 0 bytes.
+ continue
+ }
+ // 2 byte offset
+ if s >= len(src)-2 {
+ if debug {
+ fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ offset := binary.LittleEndian.Uint16(src[s:])
+ s += 2
+ if offset == 0 {
+ if debug {
+ fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ if int(offset) > uncompressed {
+ if debug {
+ fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
+ }
+ return nil, 0, ErrCorrupt
+ }
+
+ if ml == lz4MinMatch+15 {
+ for {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ val := src[s]
+ s++
+ ml += int(val)
+ if val != 255 {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ break
+ }
+ }
+ }
+ if offset == lastOffset {
+ if debug {
+ fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset)
+ }
+ if !inline {
+ d += emitRepeat16(dst[d:], offset, ml)
+ } else {
+ length := ml
+ dst := dst[d:]
+ for len(dst) > 5 {
+ // Repeat offset, make length cheaper
+ length -= 4
+ if length <= 4 {
+ dst[0] = uint8(length)<<2 | tagCopy1
+ dst[1] = 0
+ d += 2
+ break
+ }
+ if length < 8 && offset < 2048 {
+ // Encode WITH offset
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
+ d += 2
+ break
+ }
+ if length < (1<<8)+4 {
+ length -= 4
+ dst[2] = uint8(length)
+ dst[1] = 0
+ dst[0] = 5<<2 | tagCopy1
+ d += 3
+ break
+ }
+ if length < (1<<16)+(1<<8) {
+ length -= 1 << 8
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 6<<2 | tagCopy1
+ d += 4
+ break
+ }
+ const maxRepeat = (1 << 24) - 1
+ length -= 1 << 16
+ left := 0
+ if length > maxRepeat {
+ left = length - maxRepeat + 4
+ length = maxRepeat - 4
+ }
+ dst[4] = uint8(length >> 16)
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 7<<2 | tagCopy1
+ if left > 0 {
+ d += 5 + emitRepeat16(dst[5:], offset, left)
+ break
+ }
+ d += 5
+ break
+ }
+ }
+ } else {
+ if debug {
+ fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
+ }
+ if !inline {
+ d += emitCopy16(dst[d:], offset, ml)
+ } else {
+ length := ml
+ dst := dst[d:]
+ for len(dst) > 5 {
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ off := 3
+ if offset < 2048 {
+ // emit 8 bytes as tagCopy1, rest as repeats.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
+ length -= 8
+ off = 2
+ } else {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ // Emit remaining as repeat value (minimum 4 bytes).
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = 59<<2 | tagCopy2
+ length -= 60
+ }
+ // Emit remaining as repeats, at least 4 bytes remain.
+ d += off + emitRepeat16(dst[off:], offset, length)
+ break
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = uint8(length-1)<<2 | tagCopy2
+ d += 3
+ break
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ d += 2
+ break
+ }
+ }
+ lastOffset = offset
+ }
+ uncompressed += ml
+ if d > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ }
+
+ return dst[:d], uncompressed, nil
+}
+
+// ConvertBlockSnappy will convert an LZ4s block and append it
+// as a Snappy block without block length to dst.
+// The uncompressed size is returned as well.
+// dst must have capacity to contain the entire compressed block.
+func (l *LZ4sConverter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) {
+ if len(src) == 0 {
+ return dst, 0, nil
+ }
+ const debug = false
+ const lz4MinMatch = 3
+
+ s, d := 0, len(dst)
+ dst = dst[:cap(dst)]
+ // Use assembly when possible
+ if !debug && hasAmd64Asm {
+ res, sz := cvtLZ4sBlockSnappyAsm(dst[d:], src)
+ if res < 0 {
+ const (
+ errCorrupt = -1
+ errDstTooSmall = -2
+ )
+ switch res {
+ case errCorrupt:
+ return nil, 0, ErrCorrupt
+ case errDstTooSmall:
+ return nil, 0, ErrDstTooSmall
+ default:
+ return nil, 0, fmt.Errorf("unexpected result: %d", res)
+ }
+ }
+ if d+sz > len(dst) {
+ return nil, 0, ErrDstTooSmall
+ }
+ return dst[:d+sz], res, nil
+ }
+
+ dLimit := len(dst) - 10
+ var uncompressed int
+ if debug {
+ fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
+ }
+
+ for {
+ if s >= len(src) {
+ return nil, 0, ErrCorrupt
+ }
+ // Read literal info
+ token := src[s]
+ ll := int(token >> 4)
+ ml := int(lz4MinMatch + (token & 0xf))
+
+ // If upper nibble is 15, literal length is extended
+ if token >= 0xf0 {
+ for {
+ s++
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ val := src[s]
+ ll += int(val)
+ if val != 255 {
+ break
+ }
+ }
+ }
+ // Skip past token
+ if s+ll >= len(src) {
+ if debug {
+ fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ s++
+ if ll > 0 {
+ if d+ll > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ if debug {
+ fmt.Printf("emit %d literals\n", ll)
+ }
+ d += emitLiteralGo(dst[d:], src[s:s+ll])
+ s += ll
+ uncompressed += ll
+ }
+
+ // Check if we are done...
+ if ml == lz4MinMatch {
+ if s == len(src) {
+ break
+ }
+ // 0 bytes.
+ continue
+ }
+ // 2 byte offset
+ if s >= len(src)-2 {
+ if debug {
+ fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ offset := binary.LittleEndian.Uint16(src[s:])
+ s += 2
+ if offset == 0 {
+ if debug {
+ fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ if int(offset) > uncompressed {
+ if debug {
+ fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
+ }
+ return nil, 0, ErrCorrupt
+ }
+
+ if ml == lz4MinMatch+15 {
+ for {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ val := src[s]
+ s++
+ ml += int(val)
+ if val != 255 {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ break
+ }
+ }
+ }
+ if debug {
+ fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
+ }
+ length := ml
+ // d += emitCopyNoRepeat(dst[d:], int(offset), ml)
+ for length > 0 {
+ if d >= dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ // Emit a length 64 copy, encoded as 3 bytes.
+ dst[d+2] = uint8(offset >> 8)
+ dst[d+1] = uint8(offset)
+ dst[d+0] = 63<<2 | tagCopy2
+ length -= 64
+ d += 3
+ continue
+ }
+ if length >= 12 || offset >= 2048 || length < 4 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[d+2] = uint8(offset >> 8)
+ dst[d+1] = uint8(offset)
+ dst[d+0] = uint8(length-1)<<2 | tagCopy2
+ d += 3
+ break
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[d+1] = uint8(offset)
+ dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ d += 2
+ break
+ }
+ uncompressed += ml
+ if d > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ }
+
+ return dst[:d], uncompressed, nil
+}
diff --git a/vendor/github.com/klauspost/compress/s2/reader.go b/vendor/github.com/klauspost/compress/s2/reader.go
new file mode 100644
index 0000000000..2f01a3987f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/reader.go
@@ -0,0 +1,1062 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019+ Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "runtime"
+ "sync"
+)
+
+// ErrCantSeek is returned if the stream cannot be seeked.
+type ErrCantSeek struct {
+ Reason string
+}
+
+// Error returns the error as string.
+func (e ErrCantSeek) Error() string {
+ return fmt.Sprintf("s2: Can't seek because %s", e.Reason)
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt with S2 changes.
+func NewReader(r io.Reader, opts ...ReaderOption) *Reader {
+ nr := Reader{
+ r: r,
+ maxBlock: maxBlockSize,
+ }
+ for _, opt := range opts {
+ if err := opt(&nr); err != nil {
+ nr.err = err
+ return &nr
+ }
+ }
+ nr.maxBufSize = MaxEncodedLen(nr.maxBlock) + checksumSize
+ if nr.lazyBuf > 0 {
+ nr.buf = make([]byte, MaxEncodedLen(nr.lazyBuf)+checksumSize)
+ } else {
+ nr.buf = make([]byte, MaxEncodedLen(defaultBlockSize)+checksumSize)
+ }
+ nr.readHeader = nr.ignoreStreamID
+ nr.paramsOK = true
+ return &nr
+}
+
+// ReaderOption is an option for creating a decoder.
+type ReaderOption func(*Reader) error
+
+// ReaderMaxBlockSize allows to control allocations if the stream
+// has been compressed with a smaller WriterBlockSize, or with the default 1MB.
+// Blocks must be this size or smaller to decompress,
+// otherwise the decoder will return ErrUnsupported.
+//
+// For streams compressed with Snappy this can safely be set to 64KB (64 << 10).
+//
+// Default is the maximum limit of 4MB.
+func ReaderMaxBlockSize(blockSize int) ReaderOption {
+ return func(r *Reader) error {
+ if blockSize > maxBlockSize || blockSize <= 0 {
+ return errors.New("s2: block size too large. Must be <= 4MB and > 0")
+ }
+ if r.lazyBuf == 0 && blockSize < defaultBlockSize {
+ r.lazyBuf = blockSize
+ }
+ r.maxBlock = blockSize
+ return nil
+ }
+}
+
+// ReaderAllocBlock allows to control upfront stream allocations
+// and not allocate for frames bigger than this initially.
+// If frames bigger than this is seen a bigger buffer will be allocated.
+//
+// Default is 1MB, which is default output size.
+func ReaderAllocBlock(blockSize int) ReaderOption {
+ return func(r *Reader) error {
+ if blockSize > maxBlockSize || blockSize < 1024 {
+ return errors.New("s2: invalid ReaderAllocBlock. Must be <= 4MB and >= 1024")
+ }
+ r.lazyBuf = blockSize
+ return nil
+ }
+}
+
+// ReaderIgnoreStreamIdentifier will make the reader skip the expected
+// stream identifier at the beginning of the stream.
+// This can be used when serving a stream that has been forwarded to a specific point.
+func ReaderIgnoreStreamIdentifier() ReaderOption {
+ return func(r *Reader) error {
+ r.ignoreStreamID = true
+ return nil
+ }
+}
+
+// ReaderSkippableCB will register a callback for chuncks with the specified ID.
+// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive).
+// For each chunk with the ID, the callback is called with the content.
+// Any returned non-nil error will abort decompression.
+// Only one callback per ID is supported, latest sent will be used.
+func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption {
+ return func(r *Reader) error {
+ if id < 0x80 || id > 0xfd {
+ return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfd (inclusive)")
+ }
+ r.skippableCB[id] = fn
+ return nil
+ }
+}
+
+// ReaderIgnoreCRC will make the reader skip CRC calculation and checks.
+func ReaderIgnoreCRC() ReaderOption {
+ return func(r *Reader) error {
+ r.ignoreCRC = true
+ return nil
+ }
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+ r io.Reader
+ err error
+ decoded []byte
+ buf []byte
+ skippableCB [0x80]func(r io.Reader) error
+ blockStart int64 // Uncompressed offset at start of current.
+ index *Index
+
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ i, j int
+ // maximum block size allowed.
+ maxBlock int
+ // maximum expected buffer size.
+ maxBufSize int
+ // alloc a buffer this size if > 0.
+ lazyBuf int
+ readHeader bool
+ paramsOK bool
+ snappyFrame bool
+ ignoreStreamID bool
+ ignoreCRC bool
+}
+
+// GetBufferCapacity returns the capacity of the internal buffer.
+// This might be useful to know when reusing the same reader in combination
+// with the lazy buffer option.
+func (r *Reader) GetBufferCapacity() int {
+ return cap(r.buf)
+}
+
+// ensureBufferSize will ensure that the buffer can take at least n bytes.
+// If false is returned the buffer exceeds maximum allowed size.
+func (r *Reader) ensureBufferSize(n int) bool {
+ if n > r.maxBufSize {
+ r.err = ErrCorrupt
+ return false
+ }
+ if cap(r.buf) >= n {
+ return true
+ }
+ // Realloc buffer.
+ r.buf = make([]byte, n)
+ return true
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+ if !r.paramsOK {
+ return
+ }
+ r.index = nil
+ r.r = reader
+ r.err = nil
+ r.i = 0
+ r.j = 0
+ r.blockStart = 0
+ r.readHeader = r.ignoreStreamID
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+// skippable will skip n bytes.
+// If the supplied reader supports seeking that is used.
+// tmp is used as a temporary buffer for reading.
+// The supplied slice does not need to be the size of the read.
+func (r *Reader) skippable(tmp []byte, n int, allowEOF bool, id uint8) (ok bool) {
+ if id < 0x80 {
+ r.err = fmt.Errorf("interbal error: skippable id < 0x80")
+ return false
+ }
+ if fn := r.skippableCB[id-0x80]; fn != nil {
+ rd := io.LimitReader(r.r, int64(n))
+ r.err = fn(rd)
+ if r.err != nil {
+ return false
+ }
+ _, r.err = io.CopyBuffer(ioutil.Discard, rd, tmp)
+ return r.err == nil
+ }
+ if rs, ok := r.r.(io.ReadSeeker); ok {
+ _, err := rs.Seek(int64(n), io.SeekCurrent)
+ if err == nil {
+ return true
+ }
+ if err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrCorrupt
+ return false
+ }
+ }
+ for n > 0 {
+ if n < len(tmp) {
+ tmp = tmp[:n]
+ }
+ if _, r.err = io.ReadFull(r.r, tmp); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ n -= len(tmp)
+ }
+ return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ for {
+ if r.i < r.j {
+ n := copy(p, r.decoded[r.i:r.j])
+ r.i += n
+ return n, nil
+ }
+ if !r.readFull(r.buf[:4], true) {
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ r.blockStart += int64(r.j)
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.ensureBufferSize(chunkLen) {
+ if r.err == nil {
+ r.err = ErrUnsupported
+ }
+ return 0, r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if r.snappyFrame && n > maxSnappyBlockSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+
+ if n > len(r.decoded) {
+ if n > r.maxBlock {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.decoded = make([]byte, n)
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if !r.ignoreCRC && crc(r.decoded[:n]) != checksum {
+ r.err = ErrCRC
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeUncompressedData:
+ r.blockStart += int64(r.j)
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.ensureBufferSize(chunkLen) {
+ if r.err == nil {
+ r.err = ErrUnsupported
+ }
+ return 0, r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - checksumSize
+ if r.snappyFrame && n > maxSnappyBlockSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if n > len(r.decoded) {
+ if n > r.maxBlock {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.decoded = make([]byte, n)
+ }
+ if !r.readFull(r.decoded[:n], false) {
+ return 0, r.err
+ }
+ if !r.ignoreCRC && crc(r.decoded[:n]) != checksum {
+ r.err = ErrCRC
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return 0, r.err
+ }
+ if string(r.buf[:len(magicBody)]) != magicBody {
+ if string(r.buf[:len(magicBody)]) != magicBodySnappy {
+ r.err = ErrCorrupt
+ return 0, r.err
+ } else {
+ r.snappyFrame = true
+ }
+ } else {
+ r.snappyFrame = false
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ // fmt.Printf("ERR chunktype: 0x%x\n", chunkType)
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if chunkLen > maxChunkSize {
+ // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen)
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen)
+ if !r.skippable(r.buf, chunkLen, false, chunkType) {
+ return 0, r.err
+ }
+ }
+}
+
+// DecodeConcurrent will decode the full stream to w.
+// This function should not be combined with reading, seeking or other operations.
+// Up to 'concurrent' goroutines will be used.
+// If <= 0, runtime.NumCPU will be used.
+// On success the number of bytes decompressed nil and is returned.
+// This is mainly intended for bigger streams.
+func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, err error) {
+ if r.i > 0 || r.j > 0 || r.blockStart > 0 {
+ return 0, errors.New("DecodeConcurrent called after ")
+ }
+ if concurrent <= 0 {
+ concurrent = runtime.NumCPU()
+ }
+
+ // Write to output
+ var errMu sync.Mutex
+ var aErr error
+ setErr := func(e error) (ok bool) {
+ errMu.Lock()
+ defer errMu.Unlock()
+ if e == nil {
+ return aErr == nil
+ }
+ if aErr == nil {
+ aErr = e
+ }
+ return false
+ }
+ hasErr := func() (ok bool) {
+ errMu.Lock()
+ v := aErr != nil
+ errMu.Unlock()
+ return v
+ }
+
+ var aWritten int64
+ toRead := make(chan []byte, concurrent)
+ writtenBlocks := make(chan []byte, concurrent)
+ queue := make(chan chan []byte, concurrent)
+ reUse := make(chan chan []byte, concurrent)
+ for i := 0; i < concurrent; i++ {
+ toRead <- make([]byte, 0, r.maxBufSize)
+ writtenBlocks <- make([]byte, 0, r.maxBufSize)
+ reUse <- make(chan []byte, 1)
+ }
+ // Writer
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for toWrite := range queue {
+ entry := <-toWrite
+ reUse <- toWrite
+ if hasErr() {
+ writtenBlocks <- entry
+ continue
+ }
+ n, err := w.Write(entry)
+ want := len(entry)
+ writtenBlocks <- entry
+ if err != nil {
+ setErr(err)
+ continue
+ }
+ if n != want {
+ setErr(io.ErrShortWrite)
+ continue
+ }
+ aWritten += int64(n)
+ }
+ }()
+
+ // Reader
+ defer func() {
+ close(queue)
+ if r.err != nil {
+ err = r.err
+ setErr(r.err)
+ }
+ wg.Wait()
+ if err == nil {
+ err = aErr
+ }
+ written = aWritten
+ }()
+
+ for !hasErr() {
+ if !r.readFull(r.buf[:4], true) {
+ if r.err == io.EOF {
+ r.err = nil
+ }
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ r.blockStart += int64(r.j)
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if chunkLen > r.maxBufSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ orgBuf := <-toRead
+ buf := orgBuf[:chunkLen]
+
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if r.snappyFrame && n > maxSnappyBlockSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+
+ if n > r.maxBlock {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ wg.Add(1)
+
+ decoded := <-writtenBlocks
+ entry := <-reUse
+ queue <- entry
+ go func() {
+ defer wg.Done()
+ decoded = decoded[:n]
+ _, err := Decode(decoded, buf)
+ toRead <- orgBuf
+ if err != nil {
+ writtenBlocks <- decoded
+ setErr(err)
+ return
+ }
+ if !r.ignoreCRC && crc(decoded) != checksum {
+ writtenBlocks <- decoded
+ setErr(ErrCRC)
+ return
+ }
+ entry <- decoded
+ }()
+ continue
+
+ case chunkTypeUncompressedData:
+
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if chunkLen > r.maxBufSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ // Grab write buffer
+ orgBuf := <-writtenBlocks
+ buf := orgBuf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read content.
+ n := chunkLen - checksumSize
+
+ if r.snappyFrame && n > maxSnappyBlockSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if n > r.maxBlock {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ // Read uncompressed
+ buf = orgBuf[:n]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+
+ if !r.ignoreCRC && crc(buf) != checksum {
+ r.err = ErrCRC
+ return 0, r.err
+ }
+ entry := <-reUse
+ queue <- entry
+ entry <- buf
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return 0, r.err
+ }
+ if string(r.buf[:len(magicBody)]) != magicBody {
+ if string(r.buf[:len(magicBody)]) != magicBodySnappy {
+ r.err = ErrCorrupt
+ return 0, r.err
+ } else {
+ r.snappyFrame = true
+ }
+ } else {
+ r.snappyFrame = false
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ // fmt.Printf("ERR chunktype: 0x%x\n", chunkType)
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if chunkLen > maxChunkSize {
+ // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen)
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen)
+ if !r.skippable(r.buf, chunkLen, false, chunkType) {
+ return 0, r.err
+ }
+ }
+ return 0, r.err
+}
+
+// Skip will skip n bytes forward in the decompressed output.
+// For larger skips this consumes less CPU and is faster than reading output and discarding it.
+// CRC is not checked on skipped blocks.
+// io.ErrUnexpectedEOF is returned if the stream ends before all bytes have been skipped.
+// If a decoding error is encountered subsequent calls to Read will also fail.
+func (r *Reader) Skip(n int64) error {
+ if n < 0 {
+ return errors.New("attempted negative skip")
+ }
+ if r.err != nil {
+ return r.err
+ }
+
+ for n > 0 {
+ if r.i < r.j {
+ // Skip in buffer.
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ left := int64(r.j - r.i)
+ if left >= n {
+ tmp := int64(r.i) + n
+ if tmp > math.MaxInt32 {
+ return errors.New("s2: internal overflow in skip")
+ }
+ r.i = int(tmp)
+ return nil
+ }
+ n -= int64(r.j - r.i)
+ r.i = r.j
+ }
+
+ // Buffer empty; read blocks until we have content.
+ if !r.readFull(r.buf[:4], true) {
+ if r.err == io.EOF {
+ r.err = io.ErrUnexpectedEOF
+ }
+ return r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ r.blockStart += int64(r.j)
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ if !r.ensureBufferSize(chunkLen) {
+ if r.err == nil {
+ r.err = ErrUnsupported
+ }
+ return r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf, false) {
+ return r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ dLen, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return r.err
+ }
+ if dLen > r.maxBlock {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ // Check if destination is within this block
+ if int64(dLen) > n {
+ if len(r.decoded) < dLen {
+ r.decoded = make([]byte, dLen)
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return r.err
+ }
+ if crc(r.decoded[:dLen]) != checksum {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ } else {
+ // Skip block completely
+ n -= int64(dLen)
+ r.blockStart += int64(dLen)
+ dLen = 0
+ }
+ r.i, r.j = 0, dLen
+ continue
+ case chunkTypeUncompressedData:
+ r.blockStart += int64(r.j)
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ if !r.ensureBufferSize(chunkLen) {
+ if r.err != nil {
+ r.err = ErrUnsupported
+ }
+ return r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n2 := chunkLen - checksumSize
+ if n2 > len(r.decoded) {
+ if n2 > r.maxBlock {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ r.decoded = make([]byte, n2)
+ }
+ if !r.readFull(r.decoded[:n2], false) {
+ return r.err
+ }
+ if int64(n2) < n {
+ if crc(r.decoded[:n2]) != checksum {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ }
+ r.i, r.j = 0, n2
+ continue
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return r.err
+ }
+ if string(r.buf[:len(magicBody)]) != magicBody {
+ if string(r.buf[:len(magicBody)]) != magicBodySnappy {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ }
+
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ r.err = ErrUnsupported
+ return r.err
+ }
+ if chunkLen > maxChunkSize {
+ r.err = ErrUnsupported
+ return r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.skippable(r.buf, chunkLen, false, chunkType) {
+ return r.err
+ }
+ }
+ return nil
+}
+
+// ReadSeeker provides random or forward seeking in compressed content.
+// See Reader.ReadSeeker
+type ReadSeeker struct {
+ *Reader
+ readAtMu sync.Mutex
+}
+
+// ReadSeeker will return an io.ReadSeeker and io.ReaderAt
+// compatible version of the reader.
+// If 'random' is specified the returned io.Seeker can be used for
+// random seeking, otherwise only forward seeking is supported.
+// Enabling random seeking requires the original input to support
+// the io.Seeker interface.
+// A custom index can be specified which will be used if supplied.
+// When using a custom index, it will not be read from the input stream.
+// The ReadAt position will affect regular reads and the current position of Seek.
+// So using Read after ReadAt will continue from where the ReadAt stopped.
+// No functions should be used concurrently.
+// The returned ReadSeeker contains a shallow reference to the existing Reader,
+// meaning changes performed to one is reflected in the other.
+func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) {
+ // Read index if provided.
+ if len(index) != 0 {
+ if r.index == nil {
+ r.index = &Index{}
+ }
+ if _, err := r.index.Load(index); err != nil {
+ return nil, ErrCantSeek{Reason: "loading index returned: " + err.Error()}
+ }
+ }
+
+ // Check if input is seekable
+ rs, ok := r.r.(io.ReadSeeker)
+ if !ok {
+ if !random {
+ return &ReadSeeker{Reader: r}, nil
+ }
+ return nil, ErrCantSeek{Reason: "input stream isn't seekable"}
+ }
+
+ if r.index != nil {
+ // Seekable and index, ok...
+ return &ReadSeeker{Reader: r}, nil
+ }
+
+ // Load from stream.
+ r.index = &Index{}
+
+ // Read current position.
+ pos, err := rs.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()}
+ }
+ err = r.index.LoadStream(rs)
+ if err != nil {
+ if err == ErrUnsupported {
+ // If we don't require random seeking, reset input and return.
+ if !random {
+ _, err = rs.Seek(pos, io.SeekStart)
+ if err != nil {
+ return nil, ErrCantSeek{Reason: "resetting stream returned: " + err.Error()}
+ }
+ r.index = nil
+ return &ReadSeeker{Reader: r}, nil
+ }
+ return nil, ErrCantSeek{Reason: "input stream does not contain an index"}
+ }
+ return nil, ErrCantSeek{Reason: "reading index returned: " + err.Error()}
+ }
+
+ // reset position.
+ _, err = rs.Seek(pos, io.SeekStart)
+ if err != nil {
+ return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()}
+ }
+ return &ReadSeeker{Reader: r}, nil
+}
+
+// Seek allows seeking in compressed data.
+func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
+ if r.err != nil {
+ if !errors.Is(r.err, io.EOF) {
+ return 0, r.err
+ }
+ // Reset on EOF
+ r.err = nil
+ }
+
+ // Calculate absolute offset.
+ absOffset := offset
+
+ switch whence {
+ case io.SeekStart:
+ case io.SeekCurrent:
+ absOffset = r.blockStart + int64(r.i) + offset
+ case io.SeekEnd:
+ if r.index == nil {
+ return 0, ErrUnsupported
+ }
+ absOffset = r.index.TotalUncompressed + offset
+ default:
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ if absOffset < 0 {
+ return 0, errors.New("seek before start of file")
+ }
+
+ if !r.readHeader {
+ // Make sure we read the header.
+ _, r.err = r.Read([]byte{})
+ if r.err != nil {
+ return 0, r.err
+ }
+ }
+
+ // If we are inside current block no need to seek.
+ // This includes no offset changes.
+ if absOffset >= r.blockStart && absOffset < r.blockStart+int64(r.j) {
+ r.i = int(absOffset - r.blockStart)
+ return r.blockStart + int64(r.i), nil
+ }
+
+ rs, ok := r.r.(io.ReadSeeker)
+ if r.index == nil || !ok {
+ currOffset := r.blockStart + int64(r.i)
+ if absOffset >= currOffset {
+ err := r.Skip(absOffset - currOffset)
+ return r.blockStart + int64(r.i), err
+ }
+ return 0, ErrUnsupported
+ }
+
+ // We can seek and we have an index.
+ c, u, err := r.index.Find(absOffset)
+ if err != nil {
+ return r.blockStart + int64(r.i), err
+ }
+
+ // Seek to next block
+ _, err = rs.Seek(c, io.SeekStart)
+ if err != nil {
+ return 0, err
+ }
+
+ r.i = r.j // Remove rest of current block.
+ r.blockStart = u - int64(r.j) // Adjust current block start for accounting.
+ if u < absOffset {
+ // Forward inside block
+ return absOffset, r.Skip(absOffset - u)
+ }
+ if u > absOffset {
+ return 0, fmt.Errorf("s2 seek: (internal error) u (%d) > absOffset (%d)", u, absOffset)
+ }
+ return absOffset, nil
+}
+
+// ReadAt reads len(p) bytes into p starting at offset off in the
+// underlying input source. It returns the number of bytes
+// read (0 <= n <= len(p)) and any error encountered.
+//
+// When ReadAt returns n < len(p), it returns a non-nil error
+// explaining why more bytes were not returned. In this respect,
+// ReadAt is stricter than Read.
+//
+// Even if ReadAt returns n < len(p), it may use all of p as scratch
+// space during the call. If some data is available but not len(p) bytes,
+// ReadAt blocks until either all the data is available or an error occurs.
+// In this respect ReadAt is different from Read.
+//
+// If the n = len(p) bytes returned by ReadAt are at the end of the
+// input source, ReadAt may return either err == EOF or err == nil.
+//
+// If ReadAt is reading from an input source with a seek offset,
+// ReadAt should not affect nor be affected by the underlying
+// seek offset.
+//
+// Clients of ReadAt can execute parallel ReadAt calls on the
+// same input source. This is however not recommended.
+func (r *ReadSeeker) ReadAt(p []byte, offset int64) (int, error) {
+ r.readAtMu.Lock()
+ defer r.readAtMu.Unlock()
+ _, err := r.Seek(offset, io.SeekStart)
+ if err != nil {
+ return 0, err
+ }
+ n := 0
+ for n < len(p) {
+ n2, err := r.Read(p[n:])
+ if err != nil {
+ // This will include io.EOF
+ return n + n2, err
+ }
+ n += n2
+ }
+ return n, nil
+}
+
+// ReadByte satisfies the io.ByteReader interface.
+func (r *Reader) ReadByte() (byte, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ if r.i < r.j {
+ c := r.decoded[r.i]
+ r.i++
+ return c, nil
+ }
+ var tmp [1]byte
+ for i := 0; i < 10; i++ {
+ n, err := r.Read(tmp[:])
+ if err != nil {
+ return 0, err
+ }
+ if n == 1 {
+ return tmp[0], nil
+ }
+ }
+ return 0, io.ErrNoProgress
+}
+
+// SkippableCB will register a callback for chunks with the specified ID.
+// ID must be a Reserved skippable chunks ID, 0x80-0xfe (inclusive).
+// For each chunk with the ID, the callback is called with the content.
+// Any returned non-nil error will abort decompression.
+// Only one callback per ID is supported, latest sent will be used.
+// Sending a nil function will disable previous callbacks.
+func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error {
+ if id < 0x80 || id > chunkTypePadding {
+ return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfe (inclusive)")
+ }
+ r.skippableCB[id] = fn
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/s2/s2.go b/vendor/github.com/klauspost/compress/s2/s2.go
new file mode 100644
index 0000000000..dae3f731fa
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/s2.go
@@ -0,0 +1,143 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package s2 implements the S2 compression format.
+//
+// S2 is an extension of Snappy. Similar to Snappy S2 is aimed for high throughput,
+// which is why it features concurrent compression for bigger payloads.
+//
+// Decoding is compatible with Snappy compressed content,
+// but content compressed with S2 cannot be decompressed by Snappy.
+//
+// For more information on Snappy/S2 differences see README in: https://github.com/klauspost/compress/tree/master/s2
+//
+// There are actually two S2 formats: block and stream. They are related,
+// but different: trying to decompress block-compressed data as a S2 stream
+// will fail, and vice versa. The block format is the Decode and Encode
+// functions and the stream format is the Reader and Writer types.
+//
+// A "better" compression option is available. This will trade some compression
+// speed
+//
+// The block format, the more common case, is used when the complete size (the
+// number of bytes) of the original data is known upfront, at the time
+// compression starts. The stream format, also known as the framing format, is
+// for when that isn't always true.
+//
+// Blocks to not offer much data protection, so it is up to you to
+// add data validation of decompressed blocks.
+//
+// Streams perform CRC validation of the decompressed data.
+// Stream compression will also be performed on multiple CPU cores concurrently
+// significantly improving throughput.
+package s2
+
+import (
+ "bytes"
+ "hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+ - If m < 60, the next 1 + m bytes are literal bytes.
+ - Otherwise, let n be the little-endian unsigned integer denoted by the next
+ m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+ - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+ The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+ of the offset. The next byte is bits 0-7 of the offset.
+ - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+ The length is 1 + m. The offset is the little-endian unsigned integer
+ denoted by the next 2 bytes.
+ - For l == 3, the offset ranges in [0, 1<<32) and the length in
+ [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+ integer denoted by the next 4 bytes.
+*/
+const (
+ tagLiteral = 0x00
+ tagCopy1 = 0x01
+ tagCopy2 = 0x02
+ tagCopy4 = 0x03
+)
+
+const (
+ checksumSize = 4
+ chunkHeaderSize = 4
+ magicChunk = "\xff\x06\x00\x00" + magicBody
+ magicChunkSnappy = "\xff\x06\x00\x00" + magicBodySnappy
+ magicBodySnappy = "sNaPpY"
+ magicBody = "S2sTwO"
+
+ // maxBlockSize is the maximum size of the input to encodeBlock.
+ //
+ // For the framing format (Writer type instead of Encode function),
+ // this is the maximum uncompressed size of a block.
+ maxBlockSize = 4 << 20
+
+ // minBlockSize is the minimum size of block setting when creating a writer.
+ minBlockSize = 4 << 10
+
+ skippableFrameHeader = 4
+ maxChunkSize = 1<<24 - 1 // 16777215
+
+ // Default block size
+ defaultBlockSize = 1 << 20
+
+ // maxSnappyBlockSize is the maximum snappy block size.
+ maxSnappyBlockSize = 1 << 16
+
+ obufHeaderLen = checksumSize + chunkHeaderSize
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ ChunkTypeIndex = 0x99
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+ c := crc32.Update(0, crcTable, b)
+ return c>>15 | c<<17 + 0xa282ead8
+}
+
+// literalExtraSize returns the extra size of encoding n literals.
+// n should be >= 0 and <= math.MaxUint32.
+func literalExtraSize(n int64) int64 {
+ if n == 0 {
+ return 0
+ }
+ switch {
+ case n < 60:
+ return 1
+ case n < 1<<8:
+ return 2
+ case n < 1<<16:
+ return 3
+ case n < 1<<24:
+ return 4
+ default:
+ return 5
+ }
+}
+
+type byter interface {
+ Bytes() []byte
+}
+
+var _ byter = &bytes.Buffer{}
diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go
new file mode 100644
index 0000000000..089cd36d8c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/writer.go
@@ -0,0 +1,1020 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Copyright (c) 2019+ Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "crypto/rand"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "runtime"
+ "sync"
+)
+
+const (
+ levelUncompressed = iota + 1
+ levelFast
+ levelBetter
+ levelBest
+)
+
+// NewWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// Users must call Close to guarantee all data has been forwarded to
+// the underlying io.Writer and that resources are released.
+// They may also call Flush zero or more times before calling Close.
+func NewWriter(w io.Writer, opts ...WriterOption) *Writer {
+ w2 := Writer{
+ blockSize: defaultBlockSize,
+ concurrency: runtime.GOMAXPROCS(0),
+ randSrc: rand.Reader,
+ level: levelFast,
+ }
+ for _, opt := range opts {
+ if err := opt(&w2); err != nil {
+ w2.errState = err
+ return &w2
+ }
+ }
+ w2.obufLen = obufHeaderLen + MaxEncodedLen(w2.blockSize)
+ w2.paramsOK = true
+ w2.ibuf = make([]byte, 0, w2.blockSize)
+ w2.buffers.New = func() interface{} {
+ return make([]byte, w2.obufLen)
+ }
+ w2.Reset(w)
+ return &w2
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+type Writer struct {
+ errMu sync.Mutex
+ errState error
+
+ // ibuf is a buffer for the incoming (uncompressed) bytes.
+ ibuf []byte
+
+ blockSize int
+ obufLen int
+ concurrency int
+ written int64
+ uncompWritten int64 // Bytes sent to compression
+ output chan chan result
+ buffers sync.Pool
+ pad int
+
+ writer io.Writer
+ randSrc io.Reader
+ writerWg sync.WaitGroup
+ index Index
+ customEnc func(dst, src []byte) int
+
+ // wroteStreamHeader is whether we have written the stream header.
+ wroteStreamHeader bool
+ paramsOK bool
+ snappy bool
+ flushOnWrite bool
+ appendIndex bool
+ level uint8
+}
+
+type result struct {
+ b []byte
+ // Uncompressed start offset
+ startOffset int64
+}
+
+// err returns the previously set error.
+// If no error has been set it is set to err if not nil.
+func (w *Writer) err(err error) error {
+ w.errMu.Lock()
+ errSet := w.errState
+ if errSet == nil && err != nil {
+ w.errState = err
+ errSet = err
+ }
+ w.errMu.Unlock()
+ return errSet
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to w.
+// This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+ if !w.paramsOK {
+ return
+ }
+ // Close previous writer, if any.
+ if w.output != nil {
+ close(w.output)
+ w.writerWg.Wait()
+ w.output = nil
+ }
+ w.errState = nil
+ w.ibuf = w.ibuf[:0]
+ w.wroteStreamHeader = false
+ w.written = 0
+ w.writer = writer
+ w.uncompWritten = 0
+ w.index.reset(w.blockSize)
+
+ // If we didn't get a writer, stop here.
+ if writer == nil {
+ return
+ }
+ // If no concurrency requested, don't spin up writer goroutine.
+ if w.concurrency == 1 {
+ return
+ }
+
+ toWrite := make(chan chan result, w.concurrency)
+ w.output = toWrite
+ w.writerWg.Add(1)
+
+ // Start a writer goroutine that will write all output in order.
+ go func() {
+ defer w.writerWg.Done()
+
+ // Get a queued write.
+ for write := range toWrite {
+ // Wait for the data to be available.
+ input := <-write
+ in := input.b
+ if len(in) > 0 {
+ if w.err(nil) == nil {
+ // Don't expose data from previous buffers.
+ toWrite := in[:len(in):len(in)]
+ // Write to output.
+ n, err := writer.Write(toWrite)
+ if err == nil && n != len(toWrite) {
+ err = io.ErrShortBuffer
+ }
+ _ = w.err(err)
+ w.err(w.index.add(w.written, input.startOffset))
+ w.written += int64(n)
+ }
+ }
+ if cap(in) >= w.obufLen {
+ w.buffers.Put(in)
+ }
+ // close the incoming write request.
+ // This can be used for synchronizing flushes.
+ close(write)
+ }
+ }()
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+ if err := w.err(nil); err != nil {
+ return 0, err
+ }
+ if w.flushOnWrite {
+ return w.write(p)
+ }
+ // If we exceed the input buffer size, start writing
+ for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err(nil) == nil {
+ var n int
+ if len(w.ibuf) == 0 {
+ // Large write, empty buffer.
+ // Write directly from p to avoid copy.
+ n, _ = w.write(p)
+ } else {
+ n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ w.write(w.ibuf)
+ w.ibuf = w.ibuf[:0]
+ }
+ nRet += n
+ p = p[n:]
+ }
+ if err := w.err(nil); err != nil {
+ return nRet, err
+ }
+ // p should always be able to fit into w.ibuf now.
+ n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ nRet += n
+ return nRet, nil
+}
+
+// ReadFrom implements the io.ReaderFrom interface.
+// Using this is typically more efficient since it avoids a memory copy.
+// ReadFrom reads data from r until EOF or error.
+// The return value n is the number of bytes read.
+// Any error except io.EOF encountered during the read is also returned.
+func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
+ if err := w.err(nil); err != nil {
+ return 0, err
+ }
+ if len(w.ibuf) > 0 {
+ err := w.Flush()
+ if err != nil {
+ return 0, err
+ }
+ }
+ if br, ok := r.(byter); ok {
+ buf := br.Bytes()
+ if err := w.EncodeBuffer(buf); err != nil {
+ return 0, err
+ }
+ return int64(len(buf)), w.Flush()
+ }
+ for {
+ inbuf := w.buffers.Get().([]byte)[:w.blockSize+obufHeaderLen]
+ n2, err := io.ReadFull(r, inbuf[obufHeaderLen:])
+ if err != nil {
+ if err == io.ErrUnexpectedEOF {
+ err = io.EOF
+ }
+ if err != io.EOF {
+ return n, w.err(err)
+ }
+ }
+ if n2 == 0 {
+ break
+ }
+ n += int64(n2)
+ err2 := w.writeFull(inbuf[:n2+obufHeaderLen])
+ if w.err(err2) != nil {
+ break
+ }
+
+ if err != nil {
+ // We got EOF and wrote everything
+ break
+ }
+ }
+
+ return n, w.err(nil)
+}
+
+// AddSkippableBlock will add a skippable block to the stream.
+// The ID must be 0x80-0xfe (inclusive).
+// Length of the skippable block must be <= 16777215 bytes.
+func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) {
+ if err := w.err(nil); err != nil {
+ return err
+ }
+ if len(data) == 0 {
+ return nil
+ }
+ if id < 0x80 || id > chunkTypePadding {
+ return fmt.Errorf("invalid skippable block id %x", id)
+ }
+ if len(data) > maxChunkSize {
+ return fmt.Errorf("skippable block excessed maximum size")
+ }
+ var header [4]byte
+ chunkLen := 4 + len(data)
+ header[0] = id
+ header[1] = uint8(chunkLen >> 0)
+ header[2] = uint8(chunkLen >> 8)
+ header[3] = uint8(chunkLen >> 16)
+ if w.concurrency == 1 {
+ write := func(b []byte) error {
+ n, err := w.writer.Write(b)
+ if err = w.err(err); err != nil {
+ return err
+ }
+ if n != len(data) {
+ return w.err(io.ErrShortWrite)
+ }
+ w.written += int64(n)
+ return w.err(nil)
+ }
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ if w.snappy {
+ if err := write([]byte(magicChunkSnappy)); err != nil {
+ return err
+ }
+ } else {
+ if err := write([]byte(magicChunk)); err != nil {
+ return err
+ }
+ }
+ }
+ if err := write(header[:]); err != nil {
+ return err
+ }
+ if err := write(data); err != nil {
+ return err
+ }
+ }
+
+ // Create output...
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ hWriter := make(chan result)
+ w.output <- hWriter
+ if w.snappy {
+ hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)}
+ } else {
+ hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)}
+ }
+ }
+
+ // Copy input.
+ inbuf := w.buffers.Get().([]byte)[:4]
+ copy(inbuf, header[:])
+ inbuf = append(inbuf, data...)
+
+ output := make(chan result, 1)
+ // Queue output.
+ w.output <- output
+ output <- result{startOffset: w.uncompWritten, b: inbuf}
+
+ return nil
+}
+
+// EncodeBuffer will add a buffer to the stream.
+// This is the fastest way to encode a stream,
+// but the input buffer cannot be written to by the caller
+// until Flush or Close has been called when concurrency != 1.
+//
+// If you cannot control that, use the regular Write function.
+//
+// Note that input is not buffered.
+// This means that each write will result in discrete blocks being created.
+// For buffered writes, use the regular Write function.
+func (w *Writer) EncodeBuffer(buf []byte) (err error) {
+ if err := w.err(nil); err != nil {
+ return err
+ }
+
+ if w.flushOnWrite {
+ _, err := w.write(buf)
+ return err
+ }
+ // Flush queued data first.
+ if len(w.ibuf) > 0 {
+ err := w.Flush()
+ if err != nil {
+ return err
+ }
+ }
+ if w.concurrency == 1 {
+ _, err := w.writeSync(buf)
+ return err
+ }
+
+ // Spawn goroutine and write block to output channel.
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ hWriter := make(chan result)
+ w.output <- hWriter
+ if w.snappy {
+ hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)}
+ } else {
+ hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)}
+ }
+ }
+
+ for len(buf) > 0 {
+ // Cut input.
+ uncompressed := buf
+ if len(uncompressed) > w.blockSize {
+ uncompressed = uncompressed[:w.blockSize]
+ }
+ buf = buf[len(uncompressed):]
+ // Get an output buffer.
+ obuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen]
+ output := make(chan result)
+ // Queue output now, so we keep order.
+ w.output <- output
+ res := result{
+ startOffset: w.uncompWritten,
+ }
+ w.uncompWritten += int64(len(uncompressed))
+ go func() {
+ checksum := crc(uncompressed)
+
+ // Set to uncompressed.
+ chunkType := uint8(chunkTypeUncompressedData)
+ chunkLen := 4 + len(uncompressed)
+
+ // Attempt compressing.
+ n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed)))
+ n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed)
+
+ // Check if we should use this, or store as uncompressed instead.
+ if n2 > 0 {
+ chunkType = uint8(chunkTypeCompressedData)
+ chunkLen = 4 + n + n2
+ obuf = obuf[:obufHeaderLen+n+n2]
+ } else {
+ // copy uncompressed
+ copy(obuf[obufHeaderLen:], uncompressed)
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ obuf[0] = chunkType
+ obuf[1] = uint8(chunkLen >> 0)
+ obuf[2] = uint8(chunkLen >> 8)
+ obuf[3] = uint8(chunkLen >> 16)
+ obuf[4] = uint8(checksum >> 0)
+ obuf[5] = uint8(checksum >> 8)
+ obuf[6] = uint8(checksum >> 16)
+ obuf[7] = uint8(checksum >> 24)
+
+ // Queue final output.
+ res.b = obuf
+ output <- res
+ }()
+ }
+ return nil
+}
+
+func (w *Writer) encodeBlock(obuf, uncompressed []byte) int {
+ if w.customEnc != nil {
+ if ret := w.customEnc(obuf, uncompressed); ret >= 0 {
+ return ret
+ }
+ }
+ if w.snappy {
+ switch w.level {
+ case levelFast:
+ return encodeBlockSnappy(obuf, uncompressed)
+ case levelBetter:
+ return encodeBlockBetterSnappy(obuf, uncompressed)
+ case levelBest:
+ return encodeBlockBestSnappy(obuf, uncompressed)
+ }
+ return 0
+ }
+ switch w.level {
+ case levelFast:
+ return encodeBlock(obuf, uncompressed)
+ case levelBetter:
+ return encodeBlockBetter(obuf, uncompressed)
+ case levelBest:
+ return encodeBlockBest(obuf, uncompressed, nil)
+ }
+ return 0
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+ if err := w.err(nil); err != nil {
+ return 0, err
+ }
+ if w.concurrency == 1 {
+ return w.writeSync(p)
+ }
+
+ // Spawn goroutine and write block to output channel.
+ for len(p) > 0 {
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ hWriter := make(chan result)
+ w.output <- hWriter
+ if w.snappy {
+ hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)}
+ } else {
+ hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)}
+ }
+ }
+
+ var uncompressed []byte
+ if len(p) > w.blockSize {
+ uncompressed, p = p[:w.blockSize], p[w.blockSize:]
+ } else {
+ uncompressed, p = p, nil
+ }
+
+ // Copy input.
+ // If the block is incompressible, this is used for the result.
+ inbuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen]
+ obuf := w.buffers.Get().([]byte)[:w.obufLen]
+ copy(inbuf[obufHeaderLen:], uncompressed)
+ uncompressed = inbuf[obufHeaderLen:]
+
+ output := make(chan result)
+ // Queue output now, so we keep order.
+ w.output <- output
+ res := result{
+ startOffset: w.uncompWritten,
+ }
+ w.uncompWritten += int64(len(uncompressed))
+
+ go func() {
+ checksum := crc(uncompressed)
+
+ // Set to uncompressed.
+ chunkType := uint8(chunkTypeUncompressedData)
+ chunkLen := 4 + len(uncompressed)
+
+ // Attempt compressing.
+ n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed)))
+ n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed)
+
+ // Check if we should use this, or store as uncompressed instead.
+ if n2 > 0 {
+ chunkType = uint8(chunkTypeCompressedData)
+ chunkLen = 4 + n + n2
+ obuf = obuf[:obufHeaderLen+n+n2]
+ } else {
+ // Use input as output.
+ obuf, inbuf = inbuf, obuf
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ obuf[0] = chunkType
+ obuf[1] = uint8(chunkLen >> 0)
+ obuf[2] = uint8(chunkLen >> 8)
+ obuf[3] = uint8(chunkLen >> 16)
+ obuf[4] = uint8(checksum >> 0)
+ obuf[5] = uint8(checksum >> 8)
+ obuf[6] = uint8(checksum >> 16)
+ obuf[7] = uint8(checksum >> 24)
+
+ // Queue final output.
+ res.b = obuf
+ output <- res
+
+ // Put unused buffer back in pool.
+ w.buffers.Put(inbuf)
+ }()
+ nRet += len(uncompressed)
+ }
+ return nRet, nil
+}
+
+// writeFull is a special version of write that will always write the full buffer.
+// Data to be compressed should start at offset obufHeaderLen and fill the remainder of the buffer.
+// The data will be written as a single block.
+// The caller is not allowed to use inbuf after this function has been called.
+func (w *Writer) writeFull(inbuf []byte) (errRet error) {
+ if err := w.err(nil); err != nil {
+ return err
+ }
+
+ if w.concurrency == 1 {
+ _, err := w.writeSync(inbuf[obufHeaderLen:])
+ return err
+ }
+
+ // Spawn goroutine and write block to output channel.
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ hWriter := make(chan result)
+ w.output <- hWriter
+ if w.snappy {
+ hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)}
+ } else {
+ hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)}
+ }
+ }
+
+ // Get an output buffer.
+ obuf := w.buffers.Get().([]byte)[:w.obufLen]
+ uncompressed := inbuf[obufHeaderLen:]
+
+ output := make(chan result)
+ // Queue output now, so we keep order.
+ w.output <- output
+ res := result{
+ startOffset: w.uncompWritten,
+ }
+ w.uncompWritten += int64(len(uncompressed))
+
+ go func() {
+ checksum := crc(uncompressed)
+
+ // Set to uncompressed.
+ chunkType := uint8(chunkTypeUncompressedData)
+ chunkLen := 4 + len(uncompressed)
+
+ // Attempt compressing.
+ n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed)))
+ n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed)
+
+ // Check if we should use this, or store as uncompressed instead.
+ if n2 > 0 {
+ chunkType = uint8(chunkTypeCompressedData)
+ chunkLen = 4 + n + n2
+ obuf = obuf[:obufHeaderLen+n+n2]
+ } else {
+ // Use input as output.
+ obuf, inbuf = inbuf, obuf
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ obuf[0] = chunkType
+ obuf[1] = uint8(chunkLen >> 0)
+ obuf[2] = uint8(chunkLen >> 8)
+ obuf[3] = uint8(chunkLen >> 16)
+ obuf[4] = uint8(checksum >> 0)
+ obuf[5] = uint8(checksum >> 8)
+ obuf[6] = uint8(checksum >> 16)
+ obuf[7] = uint8(checksum >> 24)
+
+ // Queue final output.
+ res.b = obuf
+ output <- res
+
+ // Put unused buffer back in pool.
+ w.buffers.Put(inbuf)
+ }()
+ return nil
+}
+
+func (w *Writer) writeSync(p []byte) (nRet int, errRet error) {
+ if err := w.err(nil); err != nil {
+ return 0, err
+ }
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ var n int
+ var err error
+ if w.snappy {
+ n, err = w.writer.Write([]byte(magicChunkSnappy))
+ } else {
+ n, err = w.writer.Write([]byte(magicChunk))
+ }
+ if err != nil {
+ return 0, w.err(err)
+ }
+ if n != len(magicChunk) {
+ return 0, w.err(io.ErrShortWrite)
+ }
+ w.written += int64(n)
+ }
+
+ for len(p) > 0 {
+ var uncompressed []byte
+ if len(p) > w.blockSize {
+ uncompressed, p = p[:w.blockSize], p[w.blockSize:]
+ } else {
+ uncompressed, p = p, nil
+ }
+
+ obuf := w.buffers.Get().([]byte)[:w.obufLen]
+ checksum := crc(uncompressed)
+
+ // Set to uncompressed.
+ chunkType := uint8(chunkTypeUncompressedData)
+ chunkLen := 4 + len(uncompressed)
+
+ // Attempt compressing.
+ n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed)))
+ n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed)
+
+ if n2 > 0 {
+ chunkType = uint8(chunkTypeCompressedData)
+ chunkLen = 4 + n + n2
+ obuf = obuf[:obufHeaderLen+n+n2]
+ } else {
+ obuf = obuf[:8]
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ obuf[0] = chunkType
+ obuf[1] = uint8(chunkLen >> 0)
+ obuf[2] = uint8(chunkLen >> 8)
+ obuf[3] = uint8(chunkLen >> 16)
+ obuf[4] = uint8(checksum >> 0)
+ obuf[5] = uint8(checksum >> 8)
+ obuf[6] = uint8(checksum >> 16)
+ obuf[7] = uint8(checksum >> 24)
+
+ n, err := w.writer.Write(obuf)
+ if err != nil {
+ return 0, w.err(err)
+ }
+ if n != len(obuf) {
+ return 0, w.err(io.ErrShortWrite)
+ }
+ w.err(w.index.add(w.written, w.uncompWritten))
+ w.written += int64(n)
+ w.uncompWritten += int64(len(uncompressed))
+
+ if chunkType == chunkTypeUncompressedData {
+ // Write uncompressed data.
+ n, err := w.writer.Write(uncompressed)
+ if err != nil {
+ return 0, w.err(err)
+ }
+ if n != len(uncompressed) {
+ return 0, w.err(io.ErrShortWrite)
+ }
+ w.written += int64(n)
+ }
+ w.buffers.Put(obuf)
+ // Queue final output.
+ nRet += len(uncompressed)
+ }
+ return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+// This does not apply padding.
+func (w *Writer) Flush() error {
+ if err := w.err(nil); err != nil {
+ return err
+ }
+
+ // Queue any data still in input buffer.
+ if len(w.ibuf) != 0 {
+ if !w.wroteStreamHeader {
+ _, err := w.writeSync(w.ibuf)
+ w.ibuf = w.ibuf[:0]
+ return w.err(err)
+ } else {
+ _, err := w.write(w.ibuf)
+ w.ibuf = w.ibuf[:0]
+ err = w.err(err)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ if w.output == nil {
+ return w.err(nil)
+ }
+
+ // Send empty buffer
+ res := make(chan result)
+ w.output <- res
+ // Block until this has been picked up.
+ res <- result{b: nil, startOffset: w.uncompWritten}
+ // When it is closed, we have flushed.
+ <-res
+ return w.err(nil)
+}
+
+// Close calls Flush and then closes the Writer.
+// Calling Close multiple times is ok,
+// but calling CloseIndex after this will make it not return the index.
+func (w *Writer) Close() error {
+ _, err := w.closeIndex(w.appendIndex)
+ return err
+}
+
+// CloseIndex calls Close and returns an index on first call.
+// This is not required if you are only adding index to a stream.
+func (w *Writer) CloseIndex() ([]byte, error) {
+ return w.closeIndex(true)
+}
+
+func (w *Writer) closeIndex(idx bool) ([]byte, error) {
+ err := w.Flush()
+ if w.output != nil {
+ close(w.output)
+ w.writerWg.Wait()
+ w.output = nil
+ }
+
+ var index []byte
+ if w.err(err) == nil && w.writer != nil {
+ // Create index.
+ if idx {
+ compSize := int64(-1)
+ if w.pad <= 1 {
+ compSize = w.written
+ }
+ index = w.index.appendTo(w.ibuf[:0], w.uncompWritten, compSize)
+ // Count as written for padding.
+ if w.appendIndex {
+ w.written += int64(len(index))
+ }
+ }
+
+ if w.pad > 1 {
+ tmp := w.ibuf[:0]
+ if len(index) > 0 {
+ // Allocate another buffer.
+ tmp = w.buffers.Get().([]byte)[:0]
+ defer w.buffers.Put(tmp)
+ }
+ add := calcSkippableFrame(w.written, int64(w.pad))
+ frame, err := skippableFrame(tmp, add, w.randSrc)
+ if err = w.err(err); err != nil {
+ return nil, err
+ }
+ n, err2 := w.writer.Write(frame)
+ if err2 == nil && n != len(frame) {
+ err2 = io.ErrShortWrite
+ }
+ _ = w.err(err2)
+ }
+ if len(index) > 0 && w.appendIndex {
+ n, err2 := w.writer.Write(index)
+ if err2 == nil && n != len(index) {
+ err2 = io.ErrShortWrite
+ }
+ _ = w.err(err2)
+ }
+ }
+ err = w.err(errClosed)
+ if err == errClosed {
+ return index, nil
+ }
+ return nil, err
+}
+
+// calcSkippableFrame will return a total size to be added for written
+// to be divisible by multiple.
+// The value will always be > skippableFrameHeader.
+// The function will panic if written < 0 or wantMultiple <= 0.
+func calcSkippableFrame(written, wantMultiple int64) int {
+ if wantMultiple <= 0 {
+ panic("wantMultiple <= 0")
+ }
+ if written < 0 {
+ panic("written < 0")
+ }
+ leftOver := written % wantMultiple
+ if leftOver == 0 {
+ return 0
+ }
+ toAdd := wantMultiple - leftOver
+ for toAdd < skippableFrameHeader {
+ toAdd += wantMultiple
+ }
+ return int(toAdd)
+}
+
+// skippableFrame will add a skippable frame with a total size of bytes.
+// total should be >= skippableFrameHeader and < maxBlockSize + skippableFrameHeader
+func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) {
+ if total == 0 {
+ return dst, nil
+ }
+ if total < skippableFrameHeader {
+ return dst, fmt.Errorf("s2: requested skippable frame (%d) < 4", total)
+ }
+ if int64(total) >= maxBlockSize+skippableFrameHeader {
+ return dst, fmt.Errorf("s2: requested skippable frame (%d) >= max 1<<24", total)
+ }
+ // Chunk type 0xfe "Section 4.4 Padding (chunk type 0xfe)"
+ dst = append(dst, chunkTypePadding)
+ f := uint32(total - skippableFrameHeader)
+ // Add chunk length.
+ dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16))
+ // Add data
+ start := len(dst)
+ dst = append(dst, make([]byte, f)...)
+ _, err := io.ReadFull(r, dst[start:])
+ return dst, err
+}
+
+var errClosed = errors.New("s2: Writer is closed")
+
+// WriterOption is an option for creating a encoder.
+type WriterOption func(*Writer) error
+
+// WriterConcurrency will set the concurrency,
+// meaning the maximum number of decoders to run concurrently.
+// The value supplied must be at least 1.
+// By default this will be set to GOMAXPROCS.
+func WriterConcurrency(n int) WriterOption {
+ return func(w *Writer) error {
+ if n <= 0 {
+ return errors.New("concurrency must be at least 1")
+ }
+ w.concurrency = n
+ return nil
+ }
+}
+
+// WriterAddIndex will append an index to the end of a stream
+// when it is closed.
+func WriterAddIndex() WriterOption {
+ return func(w *Writer) error {
+ w.appendIndex = true
+ return nil
+ }
+}
+
+// WriterBetterCompression will enable better compression.
+// EncodeBetter compresses better than Encode but typically with a
+// 10-40% speed decrease on both compression and decompression.
+func WriterBetterCompression() WriterOption {
+ return func(w *Writer) error {
+ w.level = levelBetter
+ return nil
+ }
+}
+
+// WriterBestCompression will enable better compression.
+// EncodeBetter compresses better than Encode but typically with a
+// big speed decrease on compression.
+func WriterBestCompression() WriterOption {
+ return func(w *Writer) error {
+ w.level = levelBest
+ return nil
+ }
+}
+
+// WriterUncompressed will bypass compression.
+// The stream will be written as uncompressed blocks only.
+// If concurrency is > 1 CRC and output will still be done async.
+func WriterUncompressed() WriterOption {
+ return func(w *Writer) error {
+ w.level = levelUncompressed
+ return nil
+ }
+}
+
+// WriterBlockSize allows to override the default block size.
+// Blocks will be this size or smaller.
+// Minimum size is 4KB and and maximum size is 4MB.
+//
+// Bigger blocks may give bigger throughput on systems with many cores,
+// and will increase compression slightly, but it will limit the possible
+// concurrency for smaller payloads for both encoding and decoding.
+// Default block size is 1MB.
+//
+// When writing Snappy compatible output using WriterSnappyCompat,
+// the maximum block size is 64KB.
+func WriterBlockSize(n int) WriterOption {
+ return func(w *Writer) error {
+ if w.snappy && n > maxSnappyBlockSize || n < minBlockSize {
+ return errors.New("s2: block size too large. Must be <= 64K and >=4KB on for snappy compatible output")
+ }
+ if n > maxBlockSize || n < minBlockSize {
+ return errors.New("s2: block size too large. Must be <= 4MB and >=4KB")
+ }
+ w.blockSize = n
+ return nil
+ }
+}
+
+// WriterPadding will add padding to all output so the size will be a multiple of n.
+// This can be used to obfuscate the exact output size or make blocks of a certain size.
+// The contents will be a skippable frame, so it will be invisible by the decoder.
+// n must be > 0 and <= 4MB.
+// The padded area will be filled with data from crypto/rand.Reader.
+// The padding will be applied whenever Close is called on the writer.
+func WriterPadding(n int) WriterOption {
+ return func(w *Writer) error {
+ if n <= 0 {
+ return fmt.Errorf("s2: padding must be at least 1")
+ }
+ // No need to waste our time.
+ if n == 1 {
+ w.pad = 0
+ }
+ if n > maxBlockSize {
+ return fmt.Errorf("s2: padding must less than 4MB")
+ }
+ w.pad = n
+ return nil
+ }
+}
+
+// WriterPaddingSrc will get random data for padding from the supplied source.
+// By default crypto/rand is used.
+func WriterPaddingSrc(reader io.Reader) WriterOption {
+ return func(w *Writer) error {
+ w.randSrc = reader
+ return nil
+ }
+}
+
+// WriterSnappyCompat will write snappy compatible output.
+// The output can be decompressed using either snappy or s2.
+// If block size is more than 64KB it is set to that.
+func WriterSnappyCompat() WriterOption {
+ return func(w *Writer) error {
+ w.snappy = true
+ if w.blockSize > 64<<10 {
+ // We choose 8 bytes less than 64K, since that will make literal emits slightly more effective.
+ // And allows us to skip some size checks.
+ w.blockSize = (64 << 10) - 8
+ }
+ return nil
+ }
+}
+
+// WriterFlushOnWrite will compress blocks on each call to the Write function.
+//
+// This is quite inefficient as blocks size will depend on the write size.
+//
+// Use WriterConcurrency(1) to also make sure that output is flushed.
+// When Write calls return, otherwise they will be written when compression is done.
+func WriterFlushOnWrite() WriterOption {
+ return func(w *Writer) error {
+ w.flushOnWrite = true
+ return nil
+ }
+}
+
+// WriterCustomEncoder allows to override the encoder for blocks on the stream.
+// The function must compress 'src' into 'dst' and return the bytes used in dst as an integer.
+// Block size (initial varint) should not be added by the encoder.
+// Returning value 0 indicates the block could not be compressed.
+// Returning a negative value indicates that compression should be attempted.
+// The function should expect to be called concurrently.
+func WriterCustomEncoder(fn func(dst, src []byte) int) WriterOption {
+ return func(w *Writer) error {
+ w.customEnc = fn
+ return nil
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/snappy/.gitignore b/vendor/github.com/klauspost/compress/snappy/.gitignore
new file mode 100644
index 0000000000..042091d9b3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/.gitignore
@@ -0,0 +1,16 @@
+cmd/snappytool/snappytool
+testdata/bench
+
+# These explicitly listed benchmark data files are for an obsolete version of
+# snappy_test.go.
+testdata/alice29.txt
+testdata/asyoulik.txt
+testdata/fireworks.jpeg
+testdata/geo.protodata
+testdata/html
+testdata/html_x_4
+testdata/kppkn.gtb
+testdata/lcet10.txt
+testdata/paper-100k.pdf
+testdata/plrabn12.txt
+testdata/urls.10K
diff --git a/vendor/github.com/klauspost/compress/snappy/AUTHORS b/vendor/github.com/klauspost/compress/snappy/AUTHORS
new file mode 100644
index 0000000000..52ccb5a934
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/AUTHORS
@@ -0,0 +1,18 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Amazon.com, Inc
+Damian Gryski
+Eric Buth
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Klaus Post
+Rodolfo Carvalho
+Sebastien Binet
diff --git a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS
new file mode 100644
index 0000000000..ea6524ddd0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS
@@ -0,0 +1,41 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# http://code.google.com/legal/individual-cla-v1.0.html
+# http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+# Name
+
+# Please keep the list sorted.
+
+Alex Legg
+Damian Gryski
+Eric Buth
+Jan Mercl <0xjnml@gmail.com>
+Jonathan Swinney
+Kai Backman
+Klaus Post
+Marc-Antoine Ruel
+Nigel Tao
+Rob Pike
+Rodolfo Carvalho
+Russ Cox
+Sebastien Binet
diff --git a/vendor/github.com/klauspost/compress/snappy/LICENSE b/vendor/github.com/klauspost/compress/snappy/LICENSE
new file mode 100644
index 0000000000..6050c10f4c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/klauspost/compress/snappy/README.md b/vendor/github.com/klauspost/compress/snappy/README.md
new file mode 100644
index 0000000000..8271bbd090
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/README.md
@@ -0,0 +1,17 @@
+# snappy
+
+The Snappy compression format in the Go programming language.
+
+This is a drop-in replacement for `github.com/golang/snappy`.
+
+It provides a full, compatible replacement of the Snappy package by simply changing imports.
+
+See [Snappy Compatibility](https://github.com/klauspost/compress/tree/master/s2#snappy-compatibility) in the S2 documentation.
+
+"Better" compression mode is used. For buffered streams concurrent compression is used.
+
+For more options use the [s2 package](https://pkg.go.dev/github.com/klauspost/compress/s2).
+
+# usage
+
+Replace imports `github.com/golang/snappy` with `github.com/klauspost/compress/snappy`.
diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/klauspost/compress/snappy/decode.go
new file mode 100644
index 0000000000..89f1fa2344
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/decode.go
@@ -0,0 +1,60 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "io"
+
+ "github.com/klauspost/compress/s2"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = s2.ErrCorrupt
+ // ErrTooLarge reports that the uncompressed length is too large.
+ ErrTooLarge = s2.ErrTooLarge
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = s2.ErrUnsupported
+)
+
+const (
+ // maxBlockSize is the maximum size of the input to encodeBlock. It is not
+ // part of the wire format per se, but some parts of the encoder assume
+ // that an offset fits into a uint16.
+ //
+ // Also, for the framing format (Writer type instead of Encode function),
+ // https://github.com/google/snappy/blob/master/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536
+ // bytes".
+ maxBlockSize = 65536
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ return s2.DecodedLen(src)
+}
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// Decode handles the Snappy block format, not the Snappy stream format.
+func Decode(dst, src []byte) ([]byte, error) {
+ return s2.Decode(dst, src)
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+ return s2.NewReader(r, s2.ReaderMaxBlockSize(maxBlockSize))
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+//
+// Reader handles the Snappy stream format, not the Snappy block format.
+type Reader = s2.Reader
diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/klauspost/compress/snappy/encode.go
new file mode 100644
index 0000000000..e8bd72c186
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/encode.go
@@ -0,0 +1,59 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "io"
+
+ "github.com/klauspost/compress/s2"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// Encode handles the Snappy block format, not the Snappy stream format.
+func Encode(dst, src []byte) []byte {
+ return s2.EncodeSnappyBetter(dst, src)
+}
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+ return s2.MaxEncodedLen(srcLen)
+}
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+ return s2.NewWriter(w, s2.WriterSnappyCompat(), s2.WriterBetterCompression(), s2.WriterFlushOnWrite(), s2.WriterConcurrency(1))
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+ return s2.NewWriter(w, s2.WriterSnappyCompat(), s2.WriterBetterCompression())
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+//
+// Writer handles the Snappy stream format, not the Snappy block format.
+type Writer = s2.Writer
diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/klauspost/compress/snappy/snappy.go
new file mode 100644
index 0000000000..398cdc95a0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/snappy.go
@@ -0,0 +1,46 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the Snappy compression format. It aims for very
+// high speeds and reasonable compression.
+//
+// There are actually two Snappy formats: block and stream. They are related,
+// but different: trying to decompress block-compressed data as a Snappy stream
+// will fail, and vice versa. The block format is the Decode and Encode
+// functions and the stream format is the Reader and Writer types.
+//
+// The block format, the more common case, is used when the complete size (the
+// number of bytes) of the original data is known upfront, at the time
+// compression starts. The stream format, also known as the framing format, is
+// for when that isn't always true.
+//
+// The canonical, C++ implementation is at https://github.com/google/snappy and
+// it only implements the block format.
+package snappy
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+ - If m < 60, the next 1 + m bytes are literal bytes.
+ - Otherwise, let n be the little-endian unsigned integer denoted by the next
+ m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+ - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+ The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+ of the offset. The next byte is bits 0-7 of the offset.
+ - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+ The length is 1 + m. The offset is the little-endian unsigned integer
+ denoted by the next 2 bytes.
+ - For l == 3, this tag is a legacy format that is no longer issued by most
+ encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+ [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+ integer denoted by the next 4 bytes.
+*/
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 94c5737858..c8ad1bf383 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -36,14 +36,14 @@ github.com/CortexFoundation/inference/synapse
# github.com/CortexFoundation/merkletree v0.0.0-20230724124840-b6e80265a137
## explicit; go 1.19
github.com/CortexFoundation/merkletree
-# github.com/CortexFoundation/robot v1.0.7-0.20230924205950-05c3925242ed
+# github.com/CortexFoundation/robot v1.0.7-0.20230925095445-ed66eab85b41
## explicit; go 1.20
github.com/CortexFoundation/robot
github.com/CortexFoundation/robot/backend
# github.com/CortexFoundation/statik v0.0.0-20210315012922-8bb8a7b5dc66
## explicit; go 1.16
github.com/CortexFoundation/statik
-# github.com/CortexFoundation/torrentfs v1.0.55-0.20230925132950-bf8f925efaea
+# github.com/CortexFoundation/torrentfs v1.0.55-0.20230928134616-cf4f35fd8ad8
## explicit; go 1.21
github.com/CortexFoundation/torrentfs
github.com/CortexFoundation/torrentfs/backend
@@ -137,7 +137,7 @@ github.com/anacrolix/stm/stmutil
# github.com/anacrolix/sync v0.4.0
## explicit; go 1.13
github.com/anacrolix/sync
-# github.com/anacrolix/torrent v1.52.6-0.20230916034836-b84b19cc4c45
+# github.com/anacrolix/torrent v1.52.6-0.20230926122046-f009e1d583cb
## explicit; go 1.20
github.com/anacrolix/torrent
github.com/anacrolix/torrent/analysis
@@ -341,7 +341,7 @@ github.com/cockroachdb/errors/withstack
# github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b
## explicit; go 1.16
github.com/cockroachdb/logtags
-# github.com/cockroachdb/pebble v0.0.0-20230922144958-86593692e09f
+# github.com/cockroachdb/pebble v0.0.0-20230927205513-725ebe297867
## explicit; go 1.19
github.com/cockroachdb/pebble
github.com/cockroachdb/pebble/bloom
@@ -446,7 +446,7 @@ github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa
## explicit; go 1.14
github.com/deepmap/oapi-codegen/pkg/runtime
github.com/deepmap/oapi-codegen/pkg/types
-# github.com/dgraph-io/badger/v4 v4.2.1-0.20230909123407-5f004c4ef084
+# github.com/dgraph-io/badger/v4 v4.2.1-0.20230927164310-2aea1ca26005
## explicit; go 1.19
github.com/dgraph-io/badger/v4
github.com/dgraph-io/badger/v4/fb
@@ -468,7 +468,7 @@ github.com/dlclark/regexp2/syntax
# github.com/docker/docker v24.0.6+incompatible
## explicit
github.com/docker/docker/pkg/reexec
-# github.com/dop251/goja v0.0.0-20230828202809-3dbe69dd2b8e
+# github.com/dop251/goja v0.0.0-20230919151941-fc55792775de
## explicit; go 1.16
github.com/dop251/goja
github.com/dop251/goja/ast
@@ -675,6 +675,8 @@ github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
github.com/klauspost/compress/internal/cpuinfo
github.com/klauspost/compress/internal/snapref
+github.com/klauspost/compress/s2
+github.com/klauspost/compress/snappy
github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
# github.com/klauspost/cpuid/v2 v2.2.5
@@ -970,7 +972,7 @@ github.com/ucwong/filecache
# github.com/ucwong/go-ttlmap v1.0.2-0.20221020173635-331e7ddde2bb
## explicit; go 1.19
github.com/ucwong/go-ttlmap
-# github.com/ucwong/golang-kv v1.0.23-0.20230922195406-1f1883da3532
+# github.com/ucwong/golang-kv v1.0.23-0.20230928100657-72a7bea7f86f
## explicit; go 1.21
github.com/ucwong/golang-kv
github.com/ucwong/golang-kv/badger