From bcb67a3be8a704b586b35099e3e88c1fdb1c0af6 Mon Sep 17 00:00:00 2001 From: Benjamin Goering <171782+gobengo@users.noreply.github.com> Date: Mon, 1 Apr 2024 11:19:25 -0700 Subject: [PATCH] feat: uploadCarWithStat avoids copying stat.carBytes via Blob construction (#2543) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit … and instead wraps the stat.carBytes Uint8Array as a BlobLike Context * trying to make changes that help these code paths avoid the 'out of memory' others saw when testing on staging Motivation: * removing this `new Blob` could get rid of a memory allocation that could be contributing to 'out of memory' * I think by passing a BlobLike to `uploadCAR` here and not a whole blob, there will not be downstream copies of the underlying bytes because this car BlobLike, once passed to `uploadCAR` will get passed to a `CAR.BlockStream` that streams out blocks without re-buffering the whole car bytes in memory https://github.com/web3-storage/w3up/blob/main/packages/upload-client/src/index.js#L107 --- packages/api/src/routes/nfts-upload.js | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/packages/api/src/routes/nfts-upload.js b/packages/api/src/routes/nfts-upload.js index 86632f0d7c..26dcda63da 100644 --- a/packages/api/src/routes/nfts-upload.js +++ b/packages/api/src/routes/nfts-upload.js @@ -178,7 +178,16 @@ export async function uploadCarWithStat( // should only be 1 - shard size in w3up is > max upload size in CF /** @type {import('@web3-storage/w3up-client/types').CARLink[]} */ const shards = [] - await w3up.uploadCAR(new Blob([stat.carBytes]), { + const carBytesBlobLike = { + stream: () => + new ReadableStream({ + start(c) { + c.enqueue(stat.carBytes) + c.close() + }, + }), + } + await w3up.uploadCAR(carBytesBlobLike, { onShardStored: ({ cid }) => { shards.push(cid) },