Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add blockReadConcurrency option to exporter #361

Merged
merged 5 commits into from
Jan 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions packages/ipfs-unixfs-exporter/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@
"iso-random-stream": "^2.0.2",
"it-all": "^3.0.2",
"it-buffer-stream": "^3.0.0",
"it-drain": "^3.0.5",
"it-first": "^3.0.2",
"it-to-buffer": "^4.0.2",
"merge-options": "^3.0.4",
Expand Down
36 changes: 35 additions & 1 deletion packages/ipfs-unixfs-exporter/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -94,9 +94,40 @@ export type ExporterProgressEvents =
ProgressEvent<'unixfs:exporter:walk:raw', ExportWalk>

export interface ExporterOptions extends ProgressOptions<ExporterProgressEvents> {
/**
* An optional offset to start reading at.
*
* If the CID resolves to a file this will be a byte offset within that file,
* otherwise if it's a directory it will be a directory entry offset within
* the directory listing. (default: undefined)
*/
offset?: number

/**
* An optional length to read.
*
* If the CID resolves to a file this will be the number of bytes read from
* the file, otherwise if it's a directory it will be the number of directory
* entries read from the directory listing. (default: undefined)
*/
length?: number

/**
* This signal can be used to abort any long-lived operations such as fetching
* blocks from the network. (default: undefined)
*/
signal?: AbortSignal

/**
* When a DAG layer is encountered, all child nodes are loaded in parallel but
* processed as they arrive. This allows us to load sibling nodes in advance
* of yielding their bytes. Pass a value here to control the number of blocks
* loaded in parallel. If a strict depth-first traversal is required, this
* value should be set to `1`, otherwise the traversal order will tend to
* resemble a breadth-first fan-out and yield a have stable ordering.
* (default: undefined)
*/
blockReadConcurrency?: number
}

export interface Exportable<T> {
Expand Down Expand Up @@ -143,6 +174,8 @@ export interface Exportable<T> {
size: bigint

/**
* @example File content
*
* When `entry` is a file or a `raw` node, `offset` and/or `length` arguments can be passed to `entry.content()` to return slices of data:
*
* ```javascript
Expand All @@ -162,6 +195,8 @@ export interface Exportable<T> {
* return data
* ```
*
* @example Directory content
*
* If `entry` is a directory, passing `offset` and/or `length` to `entry.content()` will limit the number of files returned from the directory.
*
* ```javascript
Expand All @@ -176,7 +211,6 @@ export interface Exportable<T> {
*
* // `entries` contains the first 5 files/directories in the directory
* ```
*
*/
content(options?: ExporterOptions): AsyncGenerator<T, void, unknown>
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,10 @@ const directoryContent: UnixfsV1Resolver = (cid, node, unixfs, path, resolve, de
return result.entry
}
}),
source => parallel(source, { ordered: true }),
source => parallel(source, {
ordered: true,
concurrency: options.blockReadConcurrency
}),
source => filter(source, entry => entry != null)
)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,8 @@ async function walkDAG (blockstore: ReadableStorage, node: dagPb.PBNode | Uint8A
}
}),
(source) => parallel(source, {
ordered: true
ordered: true,
concurrency: options.blockReadConcurrency
}),
async (source) => {
for await (const { link, block, blockStart } of source) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,10 @@ async function * listDirectory (node: PBNode, path: string, resolve: Resolve, de
}
}
}),
source => parallel(source, { ordered: true })
source => parallel(source, {
ordered: true,
concurrency: options.blockReadConcurrency
})
)

for await (const { entries } of results) {
Expand Down
226 changes: 226 additions & 0 deletions packages/ipfs-unixfs-exporter/test/exporter.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import { balanced, type FileLayout, flat, trickle } from 'ipfs-unixfs-importer/layout'
import all from 'it-all'
import randomBytes from 'it-buffer-stream'
import drain from 'it-drain'
import first from 'it-first'
import last from 'it-last'
import toBuffer from 'it-to-buffer'
Expand All @@ -20,6 +21,7 @@
import { identity } from 'multiformats/hashes/identity'
import { sha256 } from 'multiformats/hashes/sha2'
import { Readable } from 'readable-stream'
import Sinon from 'sinon'
import { concat as uint8ArrayConcat } from 'uint8arrays/concat'
import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string'
import { toString as uint8ArrayToString } from 'uint8arrays/to-string'
Expand Down Expand Up @@ -1343,4 +1345,228 @@
dataSizeInBytes *= 10
}
})

it('should allow control of block read concurrency for files', async () => {
// create a multi-layered DAG of a manageable size
const imported = await first(importer([{
path: '1.2MiB.txt',
content: asAsyncIterable(smallFile)
}], block, {
rawLeaves: true,
chunker: fixedSize({ chunkSize: 50 }),
layout: balanced({ maxChildrenPerNode: 2 })
}))

if (imported == null) {
throw new Error('Nothing imported')
}

Check warning on line 1362 in packages/ipfs-unixfs-exporter/test/exporter.spec.ts

View check run for this annotation

Codecov / codecov/patch

packages/ipfs-unixfs-exporter/test/exporter.spec.ts#L1361-L1362

Added lines #L1361 - L1362 were not covered by tests

const node = dagPb.decode(await block.get(imported.cid))
expect(node.Links).to.have.lengthOf(2, 'imported node had too many children')

const child1 = dagPb.decode(await block.get(node.Links[0].Hash))
expect(child1.Links).to.have.lengthOf(2, 'layer 1 node had too many children')

const child2 = dagPb.decode(await block.get(node.Links[1].Hash))
expect(child2.Links).to.have.lengthOf(2, 'layer 1 node had too many children')

// should be raw nodes
expect(child1.Links[0].Hash.code).to.equal(raw.code, 'layer 2 node had wrong codec')
expect(child1.Links[1].Hash.code).to.equal(raw.code, 'layer 2 node had wrong codec')
expect(child2.Links[0].Hash.code).to.equal(raw.code, 'layer 2 node had wrong codec')
expect(child2.Links[1].Hash.code).to.equal(raw.code, 'layer 2 node had wrong codec')

// export file
const file = await exporter(imported.cid, block)

// export file data with default settings
const blockReadSpy = Sinon.spy(block, 'get')
const contentWithDefaultBlockConcurrency = await toBuffer(file.content())

// blocks should be loaded in default order - a whole level of sibling nodes at a time
expect(blockReadSpy.getCalls().map(call => call.args[0].toString())).to.deep.equal([
node.Links[0].Hash.toString(),
node.Links[1].Hash.toString(),
child1.Links[0].Hash.toString(),
child1.Links[1].Hash.toString(),
child2.Links[0].Hash.toString(),
child2.Links[1].Hash.toString()
])

// export file data overriding read concurrency
blockReadSpy.resetHistory()
const contentWitSmallBlockConcurrency = await toBuffer(file.content({
blockReadConcurrency: 1
}))

// blocks should be loaded in traversal order
expect(blockReadSpy.getCalls().map(call => call.args[0].toString())).to.deep.equal([
node.Links[0].Hash.toString(),
child1.Links[0].Hash.toString(),
child1.Links[1].Hash.toString(),
node.Links[1].Hash.toString(),
child2.Links[0].Hash.toString(),
child2.Links[1].Hash.toString()
])

// ensure exported bytes are the same
expect(contentWithDefaultBlockConcurrency).to.equalBytes(contentWitSmallBlockConcurrency)
})

it('should allow control of block read concurrency for directories', async () => {
const entries = 1024

// create a largeish directory
const imported = await last(importer((async function * () {
for (let i = 0; i < entries; i++) {
yield {
path: `file-${i}.txt`,
content: Uint8Array.from([i])
}
}
})(), block, {
wrapWithDirectory: true
}))

if (imported == null) {
throw new Error('Nothing imported')
}

Check warning on line 1433 in packages/ipfs-unixfs-exporter/test/exporter.spec.ts

View check run for this annotation

Codecov / codecov/patch

packages/ipfs-unixfs-exporter/test/exporter.spec.ts#L1432-L1433

Added lines #L1432 - L1433 were not covered by tests

const node = dagPb.decode(await block.get(imported.cid))
expect(node.Links).to.have.lengthOf(entries, 'imported node had too many children')

for (const link of node.Links) {
// should be raw nodes
expect(link.Hash.code).to.equal(raw.code, 'child node had wrong codec')
}

// export directory
const directory = await exporter(imported.cid, block)

// export file data with default settings
const originalGet = block.get.bind(block)

const expectedInvocations: string[] = []

for (const link of node.Links) {
expectedInvocations.push(`${link.Hash.toString()}-start`)
expectedInvocations.push(`${link.Hash.toString()}-end`)
}

const actualInvocations: string[] = []

block.get = async (cid) => {
actualInvocations.push(`${cid.toString()}-start`)

// introduce a small delay - if running in parallel actualInvocations will
// be:
// `foo-start`, `bar-start`, `baz-start`, `foo-end`, `bar-end`, `baz-end`
// if in series it will be:
// `foo-start`, `foo-end`, `bar-start`, `bar-end`, `baz-start`, `baz-end`
await delay(1)

actualInvocations.push(`${cid.toString()}-end`)

return originalGet(cid)
}

const blockReadSpy = Sinon.spy(block, 'get')
await drain(directory.content({
blockReadConcurrency: 1
}))

// blocks should be loaded in default order - a whole level of sibling nodes at a time
expect(blockReadSpy.getCalls().map(call => call.args[0].toString())).to.deep.equal(
node.Links.map(link => link.Hash.toString())
)

expect(actualInvocations).to.deep.equal(expectedInvocations)
})

it('should allow control of block read concurrency for HAMT sharded directories', async () => {
const entries = 1024

// create a sharded directory
const imported = await last(importer((async function * () {
for (let i = 0; i < entries; i++) {
yield {
path: `file-${i}.txt`,
content: Uint8Array.from([i])
}
}
})(), block, {
wrapWithDirectory: true,
shardSplitThresholdBytes: 10
}))

if (imported == null) {
throw new Error('Nothing imported')
}

Check warning on line 1504 in packages/ipfs-unixfs-exporter/test/exporter.spec.ts

View check run for this annotation

Codecov / codecov/patch

packages/ipfs-unixfs-exporter/test/exporter.spec.ts#L1503-L1504

Added lines #L1503 - L1504 were not covered by tests

const node = dagPb.decode(await block.get(imported.cid))
const data = UnixFS.unmarshal(node.Data ?? new Uint8Array(0))
expect(data.type).to.equal('hamt-sharded-directory')

// traverse the shard, collect all the CIDs
async function collectCIDs (node: PBNode): Promise<CID[]> {
const children: CID[] = []

for (const link of node.Links) {
children.push(link.Hash)

if (link.Hash.code === dagPb.code) {
const buf = await block.get(link.Hash)
const childNode = dagPb.decode(buf)

children.push(...(await collectCIDs(childNode)))
}
}

return children
}

const children: CID[] = await collectCIDs(node)

// export directory
const directory = await exporter(imported.cid, block)

// export file data with default settings
const originalGet = block.get.bind(block)

const expectedInvocations: string[] = []

for (const cid of children) {
expectedInvocations.push(`${cid.toString()}-start`)
expectedInvocations.push(`${cid.toString()}-end`)
}

const actualInvocations: string[] = []

block.get = async (cid) => {
actualInvocations.push(`${cid.toString()}-start`)

// introduce a small delay - if running in parallel actualInvocations will
// be:
// `foo-start`, `bar-start`, `baz-start`, `foo-end`, `bar-end`, `baz-end`
// if in series it will be:
// `foo-start`, `foo-end`, `bar-start`, `bar-end`, `baz-start`, `baz-end`
await delay(1)

actualInvocations.push(`${cid.toString()}-end`)

return originalGet(cid)
}

const blockReadSpy = Sinon.spy(block, 'get')
await drain(directory.content({
blockReadConcurrency: 1
}))

// blocks should be loaded in default order - a whole level of sibling nodes at a time
expect(blockReadSpy.getCalls().map(call => call.args[0].toString())).to.deep.equal(
children.map(link => link.toString())
)

expect(actualInvocations).to.deep.equal(expectedInvocations)
})
})