Skip to content

Commit

Permalink
Consolidate io_sizes, loosen expectations to what we care about
Browse files Browse the repository at this point in the history
  • Loading branch information
WillChilds-Klein committed Sep 11, 2024
1 parent 35e3e82 commit f4627f0
Show file tree
Hide file tree
Showing 2 changed files with 101 additions and 90 deletions.
6 changes: 4 additions & 2 deletions crypto/pkcs7/pkcs7_internal_bio_cipher.c
Original file line number Diff line number Diff line change
Expand Up @@ -143,10 +143,10 @@ static int enc_read(BIO *b, char *out, int outl) {
ctx->read_end = ctx->read_start = &(ctx->buf[BUF_OFFSET]);
i = BIO_read(next, ctx->read_start, ENC_BLOCK_SIZE);
if (i > 0) {
/*printf("READ %d BYTES!!\n", i);*/
/*printf("READ %d BYTES!!\n", i);*/
ctx->read_end += i;
} else {
/*printf("FAILED!!\n");*/
/*printf("FAILED!!\n");*/
}
} else {
i = ctx->read_end - ctx->read_start;
Expand Down Expand Up @@ -222,6 +222,8 @@ static int enc_read(BIO *b, char *out, int outl) {

BIO_clear_retry_flags(b);
BIO_copy_next_retry(b);
// TODO [childw] note that ossl was incorrect here
// return ((ret == 0) ? ctx->cont : ret);
return ret;
}

Expand Down
185 changes: 97 additions & 88 deletions crypto/pkcs7/pkcs7_internal_bio_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,25 @@ TEST(PKCS7Test, CipherBIO) {
bssl::UniquePtr<BIO> bio_cipher;
bssl::UniquePtr<BIO> bio_mem;

int io_sizes[] = {1,
3,
7,
8,
9,
64,
31,
923,
2 * ENC_BLOCK_SIZE,
16,
32,
512,
ENC_MIN_CHUNK_SIZE - 1,
ENC_MIN_CHUNK_SIZE,
ENC_MIN_CHUNK_SIZE + 1,
ENC_BLOCK_SIZE - 1,
ENC_BLOCK_SIZE,
ENC_BLOCK_SIZE + 1};

ASSERT_TRUE(RAND_bytes(pt, sizeof(pt)));
ASSERT_TRUE(RAND_bytes(key, sizeof(key)));
ASSERT_TRUE(RAND_bytes(iv, sizeof(iv)));
Expand Down Expand Up @@ -127,10 +146,9 @@ TEST(PKCS7Test, CipherBIO) {
ASSERT_TRUE(bio_mem);
ASSERT_TRUE(BIO_push(bio_cipher.get(), bio_mem.get()));
std::vector<uint8_t> pt_vec, ct_vec, decrypted_pt_vec;
uint8_t buff[1024 * 1024];
uint8_t buff[2 * ENC_BLOCK_SIZE]; // TODO [childw] prev. this was 1MiB
ASSERT_TRUE(RAND_bytes(buff, sizeof(buff)));
for (size_t wsize :
(size_t[]){1, 3, 7, 8, 64, 7, 0, 923, sizeof(buff), 1, 8}) {
for (size_t wsize : io_sizes) {
pt_vec.insert(pt_vec.end(), buff, buff + wsize);
EXPECT_TRUE(BIO_write(bio_cipher.get(), buff, wsize) || wsize == 0);
}
Expand All @@ -147,8 +165,7 @@ TEST(PKCS7Test, CipherBIO) {
EXPECT_TRUE(BIO_get_cipher_ctx(bio_cipher.get(), &ctx));
ASSERT_TRUE(
EVP_CipherInit_ex(ctx, EVP_aes_128_gcm(), NULL, key, iv, /*enc*/ 0));
for (size_t rsize :
(size_t[]){1, 3, 7, 8, 64, 7, 0, 923, sizeof(buff), 1, 8}) {
for (size_t rsize : io_sizes) {
EXPECT_TRUE(BIO_read(bio_cipher.get(), buff, rsize) || rsize == 0);
decrypted_pt_vec.insert(decrypted_pt_vec.end(), buff, buff + rsize);
}
Expand All @@ -157,88 +174,80 @@ TEST(PKCS7Test, CipherBIO) {
EXPECT_EQ(Bytes(pt_vec.data(), pt_vec.size()),
Bytes(decrypted_pt_vec.data(), decrypted_pt_vec.size()));

// TODO [childw] explain induce write failure
pt_vec.clear();
ct_vec.clear();
decrypted_pt_vec.clear();
bio_cipher.reset(BIO_new(BIO_f_cipher()));
ASSERT_TRUE(bio_cipher);
EXPECT_TRUE(BIO_get_cipher_ctx(bio_cipher.get(), &ctx));
ASSERT_TRUE(
EVP_CipherInit_ex(ctx, EVP_aes_128_gcm(), NULL, key, iv, /*enc*/ 1));
bio_mem.reset(BIO_new(BIO_s_mem()));
ASSERT_TRUE(bio_mem);
ASSERT_TRUE(BIO_push(bio_cipher.get(), bio_mem.get()));
const int io_size = ENC_BLOCK_SIZE;
pt_vec.insert(pt_vec.end(), buff, buff + io_size);
EXPECT_EQ(io_size, BIO_write(bio_cipher.get(), buff, io_size));
// |bio_mem| is writeable, so shouldn't have any buffered data
EXPECT_EQ(0UL, BIO_wpending(bio_cipher.get()));
// Set underlying BIO to r/o to induce buffering in |bio_cipher|
BIO_set_flags(bio_mem.get(), BIO_FLAGS_MEM_RDONLY);
pt_vec.insert(pt_vec.end(), buff, buff + io_size);
// Write to |bio_cipher| should still succeed in writing up to ENC_BLOCK_SIZE
// bytes by buffering them
int wsize = io_size > ENC_BLOCK_SIZE ? ENC_BLOCK_SIZE : io_size;
EXPECT_EQ(wsize, BIO_write(bio_cipher.get(), buff, io_size));
BIO_clear_flags(bio_mem.get(), BIO_FLAGS_MEM_RDONLY);
// Now that there's buffered data, |BIO_wpending| should match
EXPECT_EQ((size_t)wsize, BIO_wpending(bio_cipher.get()));
const int remaining = io_size - ENC_BLOCK_SIZE;
if (remaining > 0) {
EXPECT_EQ(remaining,
BIO_write(bio_cipher.get(), buff + ENC_BLOCK_SIZE, remaining));
for (int io_size : io_sizes) {
// TODO [childw] explain induce write failure
pt_vec.clear();
ct_vec.clear();
decrypted_pt_vec.clear();
bio_cipher.reset(BIO_new(BIO_f_cipher()));
ASSERT_TRUE(bio_cipher);
EXPECT_TRUE(BIO_get_cipher_ctx(bio_cipher.get(), &ctx));
ASSERT_TRUE(
EVP_CipherInit_ex(ctx, EVP_aes_128_gcm(), NULL, key, iv, /*enc*/ 1));
bio_mem.reset(BIO_new(BIO_s_mem()));
ASSERT_TRUE(bio_mem);
ASSERT_TRUE(BIO_push(bio_cipher.get(), bio_mem.get()));
pt_vec.insert(pt_vec.end(), buff, buff + io_size);
EXPECT_EQ(io_size, BIO_write(bio_cipher.get(), buff, io_size));
// |bio_mem| is writeable, so shouldn't have any buffered data
EXPECT_EQ(0UL, BIO_wpending(bio_cipher.get()));
// Set underlying BIO to r/o to induce buffering in |bio_cipher|
BIO_set_flags(bio_mem.get(), BIO_FLAGS_MEM_RDONLY);
pt_vec.insert(pt_vec.end(), buff, buff + io_size);
// Write to |bio_cipher| should still succeed in writing up to
// ENC_BLOCK_SIZE bytes by buffering them
int wsize = io_size > ENC_BLOCK_SIZE ? ENC_BLOCK_SIZE : io_size;
EXPECT_EQ(wsize, BIO_write(bio_cipher.get(), buff, io_size));
BIO_clear_flags(bio_mem.get(), BIO_FLAGS_MEM_RDONLY);
// Now that there's buffered data, |BIO_wpending| should match
EXPECT_EQ((size_t)wsize, BIO_wpending(bio_cipher.get()));
const int remaining = io_size - ENC_BLOCK_SIZE;
if (remaining > 0) {
EXPECT_EQ(remaining,
BIO_write(bio_cipher.get(), buff + ENC_BLOCK_SIZE, remaining));
}
// Flush should empty the buffered data
EXPECT_TRUE(BIO_flush(bio_cipher.get()));
EXPECT_EQ(0UL, BIO_wpending(bio_cipher.get()));
EXPECT_TRUE(BIO_get_cipher_status(bio_cipher.get()));
EXPECT_TRUE(BIO_get_cipher_ctx(bio_cipher.get(), &ctx));
// Reset BIOs, hydrate ciphertext for decryption
while (!BIO_eof(bio_mem.get())) {
size_t bytes_read = BIO_read(bio_mem.get(), buff, sizeof(buff));
ct_vec.insert(ct_vec.end(), buff, buff + bytes_read);
}
EXPECT_TRUE(BIO_reset(bio_cipher.get())); // also resets owned |bio_mem|
EXPECT_TRUE(
BIO_write(bio_mem.get(), ct_vec.data(), ct_vec.size())); // replace ct
ASSERT_TRUE(
EVP_CipherInit_ex(ctx, EVP_aes_128_gcm(), NULL, key, iv, /*enc*/ 0));
decrypted_pt_vec.resize(pt_vec.size());
EXPECT_EQ(decrypted_pt_vec.size(), BIO_pending(bio_cipher.get()));
// First read should fully succeed
EXPECT_EQ(io_size,
BIO_read(bio_cipher.get(), decrypted_pt_vec.data(), io_size));
// Disable reads from underlying BIO
auto disable_reads = [](BIO *bio, int oper, const char *argp, size_t len,
int argi, long argl, int bio_ret,
size_t *processed) -> long {
return (oper & BIO_CB_RETURN) || !(oper & BIO_CB_READ);
};
BIO_set_callback_ex(bio_mem.get(), disable_reads);
// Set retry flags so |cipher_bio| doesn't give up when the read fails
BIO_set_retry_read(bio_mem.get());
int rsize =
BIO_read(bio_cipher.get(), decrypted_pt_vec.data() + io_size, io_size);
EXPECT_EQ(0UL, BIO_pending(bio_cipher.get()));
// Re-enable reads from underlying BIO
BIO_set_callback_ex(bio_mem.get(), nullptr);
BIO_clear_retry_flags(bio_mem.get());
rsize = BIO_read(bio_cipher.get(),
decrypted_pt_vec.data() + io_size + rsize, io_size);
EXPECT_EQ(0UL, BIO_pending(bio_cipher.get()));
EXPECT_TRUE(BIO_get_cipher_status(bio_cipher.get()));
EXPECT_EQ(pt_vec.size(), decrypted_pt_vec.size());
EXPECT_EQ(Bytes(pt_vec.data(), pt_vec.size()),
Bytes(decrypted_pt_vec.data(), decrypted_pt_vec.size()));
bio_mem.release(); // |bio_cipher| took ownership
}
// Flush should empty the buffered data
EXPECT_TRUE(BIO_flush(bio_cipher.get()));
EXPECT_EQ(0UL, BIO_wpending(bio_cipher.get()));
EXPECT_TRUE(BIO_get_cipher_status(bio_cipher.get()));
EXPECT_TRUE(BIO_get_cipher_ctx(bio_cipher.get(), &ctx));
// Reset BIOs, hydrate ciphertext for decryption
while (!BIO_eof(bio_mem.get())) {
size_t bytes_read = BIO_read(bio_mem.get(), buff, sizeof(buff));
ct_vec.insert(ct_vec.end(), buff, buff + bytes_read);
}
EXPECT_TRUE(BIO_reset(bio_cipher.get())); // also resets owned |bio_mem|
EXPECT_TRUE(
BIO_write(bio_mem.get(), ct_vec.data(), ct_vec.size())); // replace ct
ASSERT_TRUE(
EVP_CipherInit_ex(ctx, EVP_aes_128_gcm(), NULL, key, iv, /*enc*/ 0));
decrypted_pt_vec.resize(pt_vec.size());
EXPECT_EQ(decrypted_pt_vec.size(), BIO_pending(bio_cipher.get()));
// First read should succeed
EXPECT_EQ(io_size,
BIO_read(bio_cipher.get(), decrypted_pt_vec.data(), io_size));
size_t buffered_chunk = BIO_pending(bio_cipher.get());
// Check if we have anything buffered in |cipher_bio|. If not, |bio_cipher|'
// |BIO_pending| falls through to underlying BIO.
if (buffered_chunk == BIO_pending(bio_mem.get())) {
buffered_chunk = 0;
EXPECT_EQ(decrypted_pt_vec.size() - io_size, BIO_pending(bio_cipher.get()));
}
// Disable reads from underlying BIO
auto disable_reads = [](BIO *bio, int oper, const char *argp, size_t len,
int argi, long argl, int bio_ret,
size_t *processed) -> long {
return (oper & BIO_CB_RETURN) || !(oper & BIO_CB_READ);
};
BIO_set_callback_ex(bio_mem.get(), disable_reads);
// Set retry flags so |cipher_bio| doesn't give up when the read fails
BIO_set_retry_read(bio_mem.get());
int rsize =
BIO_read(bio_cipher.get(), decrypted_pt_vec.data() + io_size, io_size);
EXPECT_EQ((int) buffered_chunk, rsize);
EXPECT_EQ(0UL, BIO_pending(bio_cipher.get()));
// Re-enable reads from underlying BIO
BIO_set_callback_ex(bio_mem.get(), nullptr);
BIO_clear_retry_flags(bio_mem.get());
rsize =
BIO_read(bio_cipher.get(), decrypted_pt_vec.data() + io_size + buffered_chunk, io_size);
EXPECT_EQ(io_size - (int) buffered_chunk, rsize < 0 ? 0 : rsize);
EXPECT_EQ(0UL, BIO_pending(bio_cipher.get()));
EXPECT_TRUE(BIO_get_cipher_status(bio_cipher.get()));
EXPECT_EQ(pt_vec.size(), decrypted_pt_vec.size());
EXPECT_EQ(Bytes(pt_vec.data(), pt_vec.size()),
Bytes(decrypted_pt_vec.data(), decrypted_pt_vec.size()));
bio_mem.release(); // |bio_cipher| took ownership
}

0 comments on commit f4627f0

Please sign in to comment.