Skip to content

Commit

Permalink
Merge pull request #132 from DataDog/viq111/bulk-fix-highlycompressed…
Browse files Browse the repository at this point in the history
…-payloads

[bulk] Fix being able to Decompress large payloads
  • Loading branch information
Viq111 authored Aug 24, 2023
2 parents ea68dca + bf7b920 commit 869dae0
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 4 deletions.
6 changes: 3 additions & 3 deletions zstd_bulk.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,20 +112,20 @@ func (p *BulkProcessor) Decompress(dst, src []byte) ([]byte, error) {

contentSize := decompressSizeHint(src)
if cap(dst) >= contentSize {
dst = dst[0:contentSize]
dst = dst[0:cap(dst)]
} else {
dst = make([]byte, contentSize)
}

if contentSize == 0 {
if len(dst) == 0 {
return dst, nil
}

dctx := C.ZSTD_createDCtx()
cWritten := C.ZSTD_decompress_usingDDict(
dctx,
unsafe.Pointer(&dst[0]),
C.size_t(contentSize),
C.size_t(len(dst)),
unsafe.Pointer(&src[0]),
C.size_t(len(src)),
p.dDict,
Expand Down
35 changes: 34 additions & 1 deletion zstd_bullk_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ func TestBulkEmptyOrNilDictionary(t *testing.T) {
}
}

func TestBulkCompressEmptyOrNilContent(t *testing.T) {
func TestBulkCompressDecompressEmptyOrNilContent(t *testing.T) {
p := newBulkProcessor(t, dict, BestSpeed)
compressed, err := p.Compress(nil, nil)
if err != nil {
Expand All @@ -115,6 +115,14 @@ func TestBulkCompressEmptyOrNilContent(t *testing.T) {
if len(compressed) < 4 {
t.Error("magic number doesn't exist")
}

decompressed, err := p.Decompress(nil, compressed)
if err != nil {
t.Error("failed to decompress")
}
if len(decompressed) != 0 {
t.Error("content was not decompressed correctly")
}
}

func TestBulkCompressIntoGivenDestination(t *testing.T) {
Expand Down Expand Up @@ -216,6 +224,31 @@ func TestBulkCompressAndDecompressInReverseOrder(t *testing.T) {
}
}

func TestBulkDecompressHighlyCompressable(t *testing.T) {
p := newBulkProcessor(t, dict, BestSpeed)

// Generate a big payload
msgSize := 10 * 1000 * 1000 // 10 MiB
msg := make([]byte, msgSize)
compressed, err := Compress(nil, msg)
if err != nil {
t.Error("failed to compress")
}

// Regular decompression would trigger zipbomb prevention
_, err = p.Decompress(nil, compressed)
if !IsDstSizeTooSmallError(err) {
t.Error("expected too small error")
}

// Passing an output should suceed the decompression
dst := make([]byte, 10*msgSize)
_, err = p.Decompress(dst, compressed)
if err != nil {
t.Errorf("failed to decompress: %s", err)
}
}

// BenchmarkBulkCompress-8 780148 1505 ns/op 61.14 MB/s 208 B/op 5 allocs/op
func BenchmarkBulkCompress(b *testing.B) {
p := newBulkProcessor(b, dict, BestSpeed)
Expand Down

0 comments on commit 869dae0

Please sign in to comment.