diff --git a/README.md b/README.md index 05c7359e48..62c448e06e 100644 --- a/README.md +++ b/README.md @@ -81,7 +81,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 * Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 @@ -136,7 +136,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 @@ -339,7 +339,7 @@ While the release has been extensively tested, it is recommended to testing when * s2: Fix binaries. * Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) @@ -518,7 +518,7 @@ While the release has been extensively tested, it is recommended to testing when * Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. * Feb 19, 2016: Handle small payloads faster in level 1-3. * Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. * Feb 14, 2016: Snappy: Merge upstream changes. * Feb 14, 2016: Snappy: Fix aggressive skipping. * Feb 14, 2016: Snappy: Update benchmark. diff --git a/compressible_test.go b/compressible_test.go index 759c649bc6..d42d4ab5a2 100644 --- a/compressible_test.go +++ b/compressible_test.go @@ -8,7 +8,7 @@ import ( func BenchmarkEstimate(b *testing.B) { b.ReportAllocs() - // (predictable, low entropy distibution) + // (predictable, low entropy distribution) b.Run("zeroes-5k", func(b *testing.B) { var testData = make([]byte, 5000) b.SetBytes(int64(len(testData))) @@ -19,7 +19,7 @@ func BenchmarkEstimate(b *testing.B) { b.Log(Estimate(testData)) }) - // (predictable, high entropy distibution) + // (predictable, high entropy distribution) b.Run("predictable-5k", func(b *testing.B) { var testData = make([]byte, 5000) for i := range testData { @@ -33,7 +33,7 @@ func BenchmarkEstimate(b *testing.B) { b.Log(Estimate(testData)) }) - // (not predictable, high entropy distibution) + // (not predictable, high entropy distribution) b.Run("random-500b", func(b *testing.B) { var testData = make([]byte, 500) rand.Read(testData) @@ -45,7 +45,7 @@ func BenchmarkEstimate(b *testing.B) { b.Log(Estimate(testData)) }) - // (not predictable, high entropy distibution) + // (not predictable, high entropy distribution) b.Run("random-5k", func(b *testing.B) { var testData = make([]byte, 5000) rand.Read(testData) @@ -57,7 +57,7 @@ func BenchmarkEstimate(b *testing.B) { b.Log(Estimate(testData)) }) - // (not predictable, high entropy distibution) + // (not predictable, high entropy distribution) b.Run("random-50k", func(b *testing.B) { var testData = make([]byte, 50000) rand.Read(testData) @@ -69,7 +69,7 @@ func BenchmarkEstimate(b *testing.B) { b.Log(Estimate(testData)) }) - // (not predictable, high entropy distibution) + // (not predictable, high entropy distribution) b.Run("random-500k", func(b *testing.B) { var testData = make([]byte, 500000) rand.Read(testData) @@ -81,7 +81,7 @@ func BenchmarkEstimate(b *testing.B) { b.Log(Estimate(testData)) }) - // (not predictable, medium entropy distibution) + // (not predictable, medium entropy distribution) b.Run("base-32-5k", func(b *testing.B) { var testData = make([]byte, 5000) rand.Read(testData) @@ -95,7 +95,7 @@ func BenchmarkEstimate(b *testing.B) { } b.Log(Estimate(testData)) }) - // (medium predictable, medium entropy distibution) + // (medium predictable, medium entropy distribution) b.Run("text", func(b *testing.B) { var testData = []byte(`If compression is done per-chunk, care should be taken that it doesn't leave restic backups open to watermarking/fingerprinting attacks. This is essentially the same problem we discussed related to fingerprinting the CDC deduplication process: @@ -122,7 +122,7 @@ Thoughts?`) func BenchmarkSnannonEntropyBits(b *testing.B) { b.ReportAllocs() - // (predictable, low entropy distibution) + // (predictable, low entropy distribution) b.Run("zeroes-5k", func(b *testing.B) { var testData = make([]byte, 5000) b.SetBytes(int64(len(testData))) @@ -133,7 +133,7 @@ func BenchmarkSnannonEntropyBits(b *testing.B) { b.Log(ShannonEntropyBits(testData)) }) - // (predictable, high entropy distibution) + // (predictable, high entropy distribution) b.Run("predictable-5k", func(b *testing.B) { var testData = make([]byte, 5000) for i := range testData { @@ -147,7 +147,7 @@ func BenchmarkSnannonEntropyBits(b *testing.B) { b.Log(ShannonEntropyBits(testData)) }) - // (not predictable, high entropy distibution) + // (not predictable, high entropy distribution) b.Run("random-500b", func(b *testing.B) { var testData = make([]byte, 500) rand.Read(testData) @@ -159,7 +159,7 @@ func BenchmarkSnannonEntropyBits(b *testing.B) { b.Log(ShannonEntropyBits(testData)) }) - // (not predictable, high entropy distibution) + // (not predictable, high entropy distribution) b.Run("random-5k", func(b *testing.B) { var testData = make([]byte, 5000) rand.Read(testData) @@ -171,7 +171,7 @@ func BenchmarkSnannonEntropyBits(b *testing.B) { b.Log(ShannonEntropyBits(testData)) }) - // (not predictable, high entropy distibution) + // (not predictable, high entropy distribution) b.Run("random-50k", func(b *testing.B) { var testData = make([]byte, 50000) rand.Read(testData) @@ -183,7 +183,7 @@ func BenchmarkSnannonEntropyBits(b *testing.B) { b.Log(ShannonEntropyBits(testData)) }) - // (not predictable, high entropy distibution) + // (not predictable, high entropy distribution) b.Run("random-500k", func(b *testing.B) { var testData = make([]byte, 500000) rand.Read(testData) @@ -195,7 +195,7 @@ func BenchmarkSnannonEntropyBits(b *testing.B) { b.Log(ShannonEntropyBits(testData)) }) - // (not predictable, medium entropy distibution) + // (not predictable, medium entropy distribution) b.Run("base-32-5k", func(b *testing.B) { var testData = make([]byte, 5000) rand.Read(testData) @@ -209,7 +209,7 @@ func BenchmarkSnannonEntropyBits(b *testing.B) { } b.Log(ShannonEntropyBits(testData)) }) - // (medium predictable, medium entropy distibution) + // (medium predictable, medium entropy distribution) b.Run("text", func(b *testing.B) { var testData = []byte(`If compression is done per-chunk, care should be taken that it doesn't leave restic backups open to watermarking/fingerprinting attacks. This is essentially the same problem we discussed related to fingerprinting the CDC deduplication process: diff --git a/flate/deflate.go b/flate/deflate.go index 66d1657d2c..af53fb860c 100644 --- a/flate/deflate.go +++ b/flate/deflate.go @@ -861,7 +861,7 @@ func (d *compressor) reset(w io.Writer) { } switch d.compressionLevel.chain { case 0: - // level was NoCompression or ConstantCompresssion. + // level was NoCompression or ConstantCompression. d.windowEnd = 0 default: s := d.state diff --git a/fse/decompress.go b/fse/decompress.go index cc05d0f7ea..0c7dd4ffef 100644 --- a/fse/decompress.go +++ b/fse/decompress.go @@ -15,7 +15,7 @@ const ( // It is possible, but by no way guaranteed that corrupt data will // return an error. // It is up to the caller to verify integrity of the returned data. -// Use a predefined Scrach to set maximum acceptable output size. +// Use a predefined Scratch to set maximum acceptable output size. func Decompress(b []byte, s *Scratch) ([]byte, error) { s, err := s.prepare(b) if err != nil { diff --git a/gzhttp/transport_test.go b/gzhttp/transport_test.go index 72cb80c10b..a5c8b9ee99 100644 --- a/gzhttp/transport_test.go +++ b/gzhttp/transport_test.go @@ -127,7 +127,7 @@ func TestTransportInvalid(t *testing.T) { server := httptest.NewServer(newTestHandler(bin)) c := http.Client{Transport: Transport(http.DefaultTransport)} - // Serves json as gzippped... + // Serves json as gzipped... resp, err := c.Get(server.URL + "/gzipped") if err != nil { t.Fatal(err) diff --git a/huff0/decompress.go b/huff0/decompress.go index 54bd08b25c..0f56b02d74 100644 --- a/huff0/decompress.go +++ b/huff0/decompress.go @@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) continue } // Ensure that all combinations are covered. @@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) + fmt.Fprintf(w, "%d errors, stopping\n", errs) break } } diff --git a/s2/_generate/gen.go b/s2/_generate/gen.go index d5ff78930b..05199b66be 100644 --- a/s2/_generate/gen.go +++ b/s2/_generate/gen.go @@ -1920,7 +1920,7 @@ func (o options) emitLiteral(name string, litLen, retval, dstBase, litBase reg.G return } -// genEmitRepeat generates a standlone emitRepeat. +// genEmitRepeat generates a standalone emitRepeat. func (o options) genEmitRepeat() { TEXT("emitRepeat", NOSPLIT, "func(dst []byte, offset, length int) int") Doc("emitRepeat writes a repeat chunk and returns the number of bytes written.", @@ -2088,7 +2088,7 @@ func (o options) emitRepeat(name string, length reg.GPVirtual, offset reg.GPVirt // 1 <= offset && offset <= math.MaxUint32 // 4 <= length && length <= 1 << 24 -// genEmitCopy generates a standlone emitCopy +// genEmitCopy generates a standalone emitCopy func (o options) genEmitCopy() { TEXT("emitCopy", NOSPLIT, "func(dst []byte, offset, length int) int") Doc("emitCopy writes a copy chunk and returns the number of bytes written.", "", @@ -2118,7 +2118,7 @@ func (o options) genEmitCopy() { // 1 <= offset && offset <= math.MaxUint32 // 4 <= length && length <= 1 << 24 -// genEmitCopy generates a standlone emitCopy +// genEmitCopy generates a standalone emitCopy func (o options) genEmitCopyNoRepeat() { TEXT("emitCopyNoRepeat", NOSPLIT, "func(dst []byte, offset, length int) int") Doc("emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.", "", diff --git a/s2/cmd/internal/readahead/reader.go b/s2/cmd/internal/readahead/reader.go index 42026e38c3..fe8a738f50 100644 --- a/s2/cmd/internal/readahead/reader.go +++ b/s2/cmd/internal/readahead/reader.go @@ -411,7 +411,7 @@ func (a *seekable) Seek(offset int64, whence int) (res int64, err error) { } //Seek the actual Seeker if res, err = seeker.Seek(offset, whence); err == nil { - //If the seek was successful, reinitalize ourselves (with the new position). + //If the seek was successful, reinitialize ourselves (with the new position). a.initBuffers(a.in, a.bufs, a.size) } return diff --git a/s2/cmd/s2c/main.go b/s2/cmd/s2c/main.go index cd3ab5ef66..89f96aecf1 100644 --- a/s2/cmd/s2c/main.go +++ b/s2/cmd/s2c/main.go @@ -247,7 +247,7 @@ Options:`) fmt.Printf(" %d -> %d [%.02f%%]; %v, %.01fMB/s", len(compressed), len(decomp), pct, ms, mbpersec) } if !bytes.Equal(decomp, b) { - exitErr(fmt.Errorf("decompresed data mismatch")) + exitErr(fmt.Errorf("decompressed data mismatch")) } if !*quiet { fmt.Print("... Verified ok.") diff --git a/s2/decode_test.go b/s2/decode_test.go index 86150b9ddf..956b878205 100644 --- a/s2/decode_test.go +++ b/s2/decode_test.go @@ -124,7 +124,7 @@ func TestDecoderMaxBlockSize(t *testing.T) { return } if enc.pad > 0 && buf.Len()%enc.pad != 0 { - t.Error(fmt.Errorf("wanted size to be mutiple of %d, got size %d with remainder %d", enc.pad, buf.Len(), buf.Len()%enc.pad)) + t.Error(fmt.Errorf("wanted size to be multiple of %d, got size %d with remainder %d", enc.pad, buf.Len(), buf.Len()%enc.pad)) return } encoded := buf.Bytes() diff --git a/s2/writer_test.go b/s2/writer_test.go index a8b7585a19..0188502f76 100644 --- a/s2/writer_test.go +++ b/s2/writer_test.go @@ -165,7 +165,7 @@ func TestEncoderRegression(t *testing.T) { } comp := buf.Bytes() if enc.pad > 0 && len(comp)%enc.pad != 0 { - t.Error(fmt.Errorf("wanted size to be mutiple of %d, got size %d with remainder %d", enc.pad, len(comp), len(comp)%enc.pad)) + t.Error(fmt.Errorf("wanted size to be multiple of %d, got size %d with remainder %d", enc.pad, len(comp), len(comp)%enc.pad)) return } var got []byte @@ -203,7 +203,7 @@ func TestEncoderRegression(t *testing.T) { return } if enc.pad > 0 && buf.Len()%enc.pad != 0 { - t.Error(fmt.Errorf("wanted size to be mutiple of %d, got size %d with remainder %d", enc.pad, buf.Len(), buf.Len()%enc.pad)) + t.Error(fmt.Errorf("wanted size to be multiple of %d, got size %d with remainder %d", enc.pad, buf.Len(), buf.Len()%enc.pad)) return } if !strings.Contains(name, "-snappy") { @@ -433,7 +433,7 @@ func TestWriterPadding(t *testing.T) { } if dst.Len()%padding != 0 { - t.Fatalf("wanted size to be mutiple of %d, got size %d with remainder %d", padding, dst.Len(), dst.Len()%padding) + t.Fatalf("wanted size to be multiple of %d, got size %d with remainder %d", padding, dst.Len(), dst.Len()%padding) } var got bytes.Buffer d.Reset(&dst) @@ -457,7 +457,7 @@ func TestWriterPadding(t *testing.T) { t.Fatal(err) } if dst.Len()%padding != 0 { - t.Fatalf("wanted size to be mutiple of %d, got size %d with remainder %d", padding, dst.Len(), dst.Len()%padding) + t.Fatalf("wanted size to be multiple of %d, got size %d with remainder %d", padding, dst.Len(), dst.Len()%padding) } got.Reset() diff --git a/zstd/_generate/gen.go b/zstd/_generate/gen.go index 03a3595d4a..4aa9ffdde7 100644 --- a/zstd/_generate/gen.go +++ b/zstd/_generate/gen.go @@ -220,7 +220,7 @@ func (o options) generateBody(name string, executeSingleTriple func(ctx *execute ADDQ(tmp, ec.histBasePtr) // Note: we always copy from &hist[len(hist) - v] } - Comment("Calculate poiter to s.out[cap(s.out)] (a past-end pointer)") + Comment("Calculate pointer to s.out[cap(s.out)] (a past-end pointer)") ADDQ(ec.outBase, ec.outEndPtr) Comment("outBase += outPosition") diff --git a/zstd/enc_better.go b/zstd/enc_better.go index a4f5bf91fc..84a79fde76 100644 --- a/zstd/enc_better.go +++ b/zstd/enc_better.go @@ -179,9 +179,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -210,12 +210,12 @@ encodeLoop: // Index match start+1 (long) -> s - 1 index0 := s + repOff - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -241,9 +241,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -270,11 +270,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -708,9 +708,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -738,12 +738,12 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -772,9 +772,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -801,11 +801,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/zstd/enc_dfast.go b/zstd/enc_dfast.go index a154c18f74..d36be7bd8c 100644 --- a/zstd/enc_dfast.go +++ b/zstd/enc_dfast.go @@ -138,9 +138,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -166,11 +166,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -798,9 +798,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -826,11 +826,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/zstd/encoder_test.go b/zstd/encoder_test.go index 1b2569f1db..4a39474448 100644 --- a/zstd/encoder_test.go +++ b/zstd/encoder_test.go @@ -462,7 +462,7 @@ func TestWithEncoderPadding(t *testing.T) { // Test the added padding is invisible. dst := e.EncodeAll(src, nil) if len(dst)%padding != 0 { - t.Fatalf("wanted size to be mutiple of %d, got size %d with remainder %d", padding, len(dst), len(dst)%padding) + t.Fatalf("wanted size to be multiple of %d, got size %d with remainder %d", padding, len(dst), len(dst)%padding) } got, err := d.DecodeAll(dst, nil) if err != nil { @@ -474,7 +474,7 @@ func TestWithEncoderPadding(t *testing.T) { // Test when we supply data as well. dst = e.EncodeAll(src, make([]byte, rng.Int()&255)) if len(dst)%padding != 0 { - t.Fatalf("wanted size to be mutiple of %d, got size %d with remainder %d", padding, len(dst), len(dst)%padding) + t.Fatalf("wanted size to be multiple of %d, got size %d with remainder %d", padding, len(dst), len(dst)%padding) } // Test using the writer. @@ -490,7 +490,7 @@ func TestWithEncoderPadding(t *testing.T) { } dst = buf.Bytes() if len(dst)%padding != 0 { - t.Fatalf("wanted size to be mutiple of %d, got size %d with remainder %d", padding, len(dst), len(dst)%padding) + t.Fatalf("wanted size to be multiple of %d, got size %d with remainder %d", padding, len(dst), len(dst)%padding) } // Test the added padding is invisible. got, err = d.DecodeAll(dst, nil) @@ -513,7 +513,7 @@ func TestWithEncoderPadding(t *testing.T) { } dst = buf.Bytes() if len(dst)%padding != 0 { - t.Fatalf("wanted size to be mutiple of %d, got size %d with remainder %d", padding, len(dst), len(dst)%padding) + t.Fatalf("wanted size to be multiple of %d, got size %d with remainder %d", padding, len(dst), len(dst)%padding) } // Test the added padding is invisible. got, err = d.DecodeAll(dst, nil) diff --git a/zstd/seqdec_amd64.go b/zstd/seqdec_amd64.go index 8adabd8287..c59f17e07a 100644 --- a/zstd/seqdec_amd64.go +++ b/zstd/seqdec_amd64.go @@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) } s.seqSize += ctx.litRemain @@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { return io.ErrUnexpectedEOF } - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) } if ctx.litRemain < 0 { diff --git a/zstd/seqdec_amd64.s b/zstd/seqdec_amd64.s index 5b06174b89..f5591fa1e8 100644 --- a/zstd/seqdec_amd64.s +++ b/zstd/seqdec_amd64.s @@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition @@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition