diff --git a/package.json b/package.json index 9453987566..65c18800e1 100644 --- a/package.json +++ b/package.json @@ -18,7 +18,8 @@ "release": "aegir release", "release-minor": "aegir release --type minor", "release-major": "aegir release --type major", - "coverage": "aegir coverage" + "coverage": "aegir coverage", + "dep-check": "aegir dep-check" }, "repository": { "type": "git", @@ -38,41 +39,38 @@ "homepage": "https://github.com/ipfs/js-ipfs-mfs#readme", "devDependencies": { "aegir": "^18.0.2", + "async-iterator-all": "^1.0.0", "chai": "^4.2.0", "detect-node": "^2.0.4", "detect-webworker": "^1.0.0", "dirty-chai": "^2.0.1", - "ipld": "~0.21.1", - "ipld-in-memory": "^2.0.0", - "multihashes": "~0.4.14", - "pull-buffer-stream": "^1.0.1", - "pull-traverse": "^1.0.3", + "ipfs-block-service": "~0.15.2", + "ipfs-repo": "~0.26.4", + "ipld": "~0.24.0", + "memdown": "^4.0.0", "temp-write": "^3.4.0" }, "dependencies": { - "async": "^2.6.1", - "cids": "~0.5.5", + "async-iterator-last": "^1.0.0", + "boom": "^7.2.0", + "cids": "~0.7.1", "debug": "^4.1.0", - "filereader-stream": "^2.0.0", + "err-code": "^1.1.2", "hamt-sharding": "~0.0.2", "interface-datastore": "~0.6.0", "ipfs-multipart": "~0.1.0", "ipfs-unixfs": "~0.1.16", - "ipfs-unixfs-exporter": "~0.36.1", - "ipfs-unixfs-importer": "~0.38.5", - "ipld-dag-pb": "~0.15.2", - "is-pull-stream": "~0.0.0", - "is-stream": "^1.1.0", + "ipfs-unixfs-exporter": "~0.37.1", + "ipfs-unixfs-importer": "~0.39.3", + "ipld-dag-pb": "~0.17.1", "joi": "^14.3.0", "joi-browser": "^13.4.0", "mortice": "^1.2.1", + "multicodec": "~0.5.1", + "multihashes": "~0.4.14", "once": "^1.4.0", "promisify-es6": "^1.0.3", - "pull-cat": "^1.1.11", - "pull-defer": "~0.2.3", - "pull-stream": "^3.6.9", - "pull-stream-to-stream": "^1.3.4", - "stream-to-pull-stream": "^1.7.2" + "pull-stream": "^3.6.9" }, "contributors": [ "Alan Shaw ", diff --git a/src/cli/flush.js b/src/cli/flush.js index d58ac5d098..9c9e11bb32 100644 --- a/src/cli/flush.js +++ b/src/cli/flush.js @@ -2,7 +2,7 @@ const { FILE_SEPARATOR -} = require('../core/utils') +} = require('../core/utils/constants') module.exports = { command: 'flush [path]', diff --git a/src/cli/ls.js b/src/cli/ls.js index a5c8b3cddd..f5dd288979 100644 --- a/src/cli/ls.js +++ b/src/cli/ls.js @@ -9,7 +9,7 @@ const { } = require('./utils') const { FILE_SEPARATOR -} = require('../core/utils') +} = require('../core/utils/constants') module.exports = { command: 'ls [path]', diff --git a/src/cli/write.js b/src/cli/write.js index 3fc31dec7f..71c369819a 100644 --- a/src/cli/write.js +++ b/src/cli/write.js @@ -109,7 +109,7 @@ module.exports = { argv.resolve((async () => { const ipfs = await getIpfs() - return ipfs.files.write(path, process.stdin, { + await ipfs.files.write(path, process.stdin, { offset, length, create, diff --git a/src/core/cp.js b/src/core/cp.js index 6126f74eea..fdf882127f 100644 --- a/src/core/cp.js +++ b/src/core/cp.js @@ -1,233 +1,143 @@ 'use strict' -const waterfall = require('async/waterfall') -const parallel = require('async/parallel') -const { - addLink, - updateTree, - updateMfsRoot, - toTrail, - toSourcesAndDestination, - toMfsPath -} = require('./utils') -const stat = require('./stat') const mkdir = require('./mkdir') const log = require('debug')('ipfs:mfs:cp') +const errCode = require('err-code') +const updateTree = require('./utils/update-tree') +const updateMfsRoot = require('./utils/update-mfs-root') +const addLink = require('./utils/add-link') +const applyDefaultOptions = require('./utils/apply-default-options') +const toMfsPath = require('./utils/to-mfs-path') +const toSourcesAndDestination = require('./utils/to-sources-and-destination') +const toTrail = require('./utils/to-trail') const defaultOptions = { parents: false, flush: true, format: 'dag-pb', hashAlg: 'sha2-256', + cidVersion: 0, shardSplitThreshold: 1000 } module.exports = (context) => { - return function mfsCp () { - const args = Array.from(arguments) - const callback = args.pop() - - waterfall([ - (cb) => toSourcesAndDestination(context, args, defaultOptions, cb), - ({ sources, destination, options }, cb) => { - if (!sources.length) { - return cb(new Error('Please supply at least one source')) - } + return async function mfsCp (...args) { + const options = applyDefaultOptions(args, defaultOptions) + let { + sources, destination + } = await toSourcesAndDestination(context, args) + + if (!sources.length) { + throw errCode(new Error('Please supply at least one source'), 'ERR_INVALID_PARAMS') + } - if (!destination) { - return cb(new Error('Please supply a destination')) - } + if (!destination) { + throw errCode(new Error('Please supply a destination'), 'ERR_INVALID_PARAMS') + } - options.parents = options.p || options.parents + options.parents = options.p || options.parents - cb(null, { sources, destination, options }) - }, - ({ sources, destination, options }, cb) => toTrail(context, destination.mfsPath, options, (error, trail) => { - if (error) { - return cb(error) - } + // make sure all sources exist + const missing = sources.find(source => !source.exists) - if (trail.length === destination.parts.length) { - log('Destination does not exist') + if (missing) { + throw errCode(new Error(`${missing.path} does not exist`), 'ERR_INVALID_PARAMS') + } - if (sources.length === 1) { - log('Only one source, copying to a file') - return copyToFile(context, sources.pop(), destination, trail, options, cb) - } + const destinationIsDirectory = isDirectory(destination) - log('Multiple sources, copying to a directory') - return copyToDirectory(context, sources, destination, trail, options, cb) - } + if (destination.exists) { + log('Destination exists') - const parent = trail[trail.length - 1] + if (sources.length === 1 && !destinationIsDirectory) { + throw errCode(new Error('directory already has entry by that name'), 'ERR_ALREADY_EXISTS') + } + } else { + log('Destination does not exist') - if (parent.type === 'dir') { - log('Destination is a directory') - return copyToDirectory(context, sources, destination, trail, options, cb) + if (sources.length > 1) { + if (!options.parents) { + throw errCode(new Error('destination did not exist, pass -p to create intermediate directories'), 'ERR_INVALID_PARAMS') } - cb(new Error('directory already has entry by that name')) - }) - ], callback) + await mkdir(context)(destination.path, options) + destination = await toMfsPath(context, destination.path) + } + } + + const destinationPath = isDirectory(destination) ? destination.mfsPath : destination.mfsDirectory + const trail = await toTrail(context, destinationPath, options) + + if (sources.length === 1) { + const source = sources.pop() + const destinationName = destinationIsDirectory ? source.name : destination.name + + log(`Only one source, copying to destination ${destinationIsDirectory ? 'directory' : 'file'} ${destinationName}`) + + return copyToFile(context, source, destinationName, trail, options) + } + + log('Multiple sources, wrapping in a directory') + return copyToDirectory(context, sources, destination, trail, options) } } -const copyToFile = (context, source, destination, destinationTrail, options, callback) => { - waterfall([ - (cb) => asExistentTrail(context, source, options, cb), - (sourceTrail, cb) => { - const parent = destinationTrail[destinationTrail.length - 1] - const child = sourceTrail[sourceTrail.length - 1] - - waterfall([ - (next) => context.ipld.get(parent.cid, next), - (result, next) => addLink(context, { - parent: result.value, - parentCid: parent.cid, - size: child.size, - cid: child.cid, - name: destination.parts[destination.parts.length - 1] - }, next), - ({ node, cid }, next) => { - parent.node = node - parent.cid = cid - parent.size = node.size - - next(null, destinationTrail) - } - ], cb) - }, +const isDirectory = (destination) => { + return destination.unixfs && + destination.unixfs.type && + destination.unixfs.type.includes('directory') +} - // update the tree with the new child - (trail, cb) => updateTree(context, trail, options, cb), +const copyToFile = async (context, source, destination, destinationTrail, options) => { + let parent = destinationTrail.pop() - // Update the MFS record with the new CID for the root of the tree - ({ cid }, cb) => updateMfsRoot(context, cid, cb) - ], (error) => callback(error)) -} + parent = await addSourceToParent(context, source, destination, parent, options) -const copyToDirectory = (context, sources, destination, destinationTrail, options, callback) => { - waterfall([ - (cb) => { - if (destinationTrail.length !== (destination.parts.length + 1)) { - log(`Making destination directory`, destination.path) - - return waterfall([ - (cb) => mkdir(context)(destination.path, options, cb), - (cb) => toMfsPath(context, destination.path, cb), - (mfsPath, cb) => { - destination = mfsPath - - toTrail(context, destination.mfsPath, options, cb) - } - ], (err, trail) => { - if (err) { - return cb(err) - } - - destinationTrail = trail - - cb() - }) - } + // update the tree with the new containg directory + destinationTrail.push(parent) - cb() - }, - (cb) => parallel( - sources.map(source => (next) => asExistentTrail(context, source, options, next)), - cb - ), - (sourceTrails, cb) => { - waterfall([ - // ensure targets do not exist - (next) => { - parallel( - sources.map(source => { - return (cb) => { - stat(context)(`${destination.path}/${source.name}`, options, (error) => { - if (error) { - if (error.message.includes('does not exist')) { - return cb() - } - - return cb(error) - } - - cb(new Error('directory already has entry by that name')) - }) - } - }), - (error) => next(error) - ) - }, - // add links to target directory - (next) => { - const parent = destinationTrail[destinationTrail.length - 1] - - waterfall([ - (next) => context.ipld.get(parent.cid, next), - (result, next) => next(null, { cid: parent.cid, node: result.value }) - ].concat( - sourceTrails.map((sourceTrail, index) => { - return (parent, done) => { - const child = sourceTrail[sourceTrail.length - 1] - - log(`Adding ${sources[index].name} to ${parent.cid.toBaseEncodedString()}`) - - addLink(context, { - parent: parent.node, - parentCid: parent.cid, - size: child.size, - cid: child.cid, - name: sources[index].name - }, (err, result) => { - if (err) { - return done(err) - } - - log(`New directory hash ${result.cid.toBaseEncodedString()}`) - - done(err, result) - }) - } - }) - ), next) - }, - - ({ node, cid }, next) => { - const parent = destinationTrail[destinationTrail.length - 1] - - parent.node = node - parent.cid = cid - parent.size = node.size - - next(null, destinationTrail) - }, - - // update the tree with the new child - (trail, next) => updateTree(context, trail, options, next), - - // Update the MFS record with the new CID for the root of the tree - ({ cid }, next) => updateMfsRoot(context, cid, next) - ], cb) - } - ], (error) => callback(error)) + const newRootCid = await updateTree(context, destinationTrail, options) + + // Update the MFS record with the new CID for the root of the tree + await updateMfsRoot(context, newRootCid) } -const asExistentTrail = (context, source, options, callback) => { - toTrail(context, source.mfsPath, options, (err, trail) => { - if (err) { - return callback(err) - } +const copyToDirectory = async (context, sources, destination, destinationTrail, options) => { + // copy all the sources to the destination + for (let i = 0; i < sources.length; i++) { + const source = sources[i] - if (source.type === 'ipfs') { - return callback(null, trail) - } + destination = await addSourceToParent(context, source, source.name, destination, options) + } - if (trail.length !== (source.parts.length + 1)) { - return callback(new Error(`${source.path} does not exist`)) - } + // update the tree with the new containg directory + destinationTrail[destinationTrail.length - 1] = destination - callback(null, trail) + const newRootCid = await updateTree(context, destinationTrail, options) + + // Update the MFS record with the new CID for the root of the tree + await updateMfsRoot(context, newRootCid) +} + +const addSourceToParent = async (context, source, childName, parent, options) => { + const sourceBlock = await context.repo.blocks.get(source.cid) + + const { + node, + cid + } = await addLink(context, { + parentCid: parent.cid, + size: sourceBlock.data.length, + cid: source.cid, + name: childName, + format: options.format, + hashAlg: options.hashAlg, + cidVersion: options.cidVersion }) + + parent.node = node + parent.cid = cid + parent.size = node.size + + return parent } diff --git a/src/core/flush.js b/src/core/flush.js index fb3c79c617..ecfc333975 100644 --- a/src/core/flush.js +++ b/src/core/flush.js @@ -1,36 +1,17 @@ 'use strict' -const waterfall = require('async/waterfall') +const applyDefaultOptions = require('./utils/apply-default-options') const stat = require('./stat') - const { FILE_SEPARATOR -} = require('./utils') +} = require('./utils/constants') const defaultOptions = {} module.exports = (context) => { - return function mfsFlush (path, options, callback) { - if (typeof options === 'function') { - callback = options - options = {} - } - - if (typeof path === 'function') { - callback = path - options = {} - path = FILE_SEPARATOR - } - - if (!path) { - path = FILE_SEPARATOR - } - - options = Object.assign({}, defaultOptions, options) + return async function mfsFlush (path = FILE_SEPARATOR, options = defaultOptions) { + options = applyDefaultOptions(options, defaultOptions) - waterfall([ - (cb) => stat(context)(path, options, cb), - (stats, cb) => cb() - ], callback) + await stat(context)(path, options) } } diff --git a/src/core/index.js b/src/core/index.js index ee99c09542..58d19e3f6f 100644 --- a/src/core/index.js +++ b/src/core/index.js @@ -2,13 +2,10 @@ const assert = require('assert') const promisify = require('promisify-es6') -const { - createLock -} = require('./utils') +const createLock = require('./utils/create-lock') // These operations are read-locked at the function level and will execute simultaneously const readOperations = { - ls: require('./ls'), stat: require('./stat') } @@ -24,22 +21,15 @@ const writeOperations = { // These operations are asynchronous and manage their own locking const unwrappedOperations = { write: require('./write'), - read: require('./read') -} - -// These operations are synchronous and manage their own locking -const unwrappedSynchronousOperations = { - readPullStream: require('./read-pull-stream'), - readReadableStream: require('./read-readable-stream'), - lsPullStream: require('./ls-pull-stream'), - lsReadableStream: require('./ls-readable-stream') + read: require('./read'), + ls: require('./ls') } const wrap = ({ options, mfs, operations, lock }) => { Object.keys(operations).forEach(key => { - mfs[key] = promisify(lock(operations[key](options))) + mfs[key] = lock(operations[key](options)) }) } @@ -55,7 +45,28 @@ module.exports = (options) => { } = Object.assign({}, defaultOptions || {}, options) assert(options.ipld, 'MFS requires an IPLD instance') - assert(options.repo, 'MFS requires an ipfs-repo instance') + assert(options.blocks, 'MFS requires an BlockStore instance') + assert(options.datastore, 'MFS requires a DataStore instance') + + // should be able to remove this when async/await PRs are in for datastore, blockstore & repo + options.repo = { + blocks: { + get: promisify(options.blocks.get, { + context: options.blocks + }) + }, + datastore: { + open: promisify(options.datastore.open, { + context: options.datastore + }), + get: promisify(options.datastore.get, { + context: options.datastore + }), + put: promisify(options.datastore.put, { + context: options.datastore + }) + } + } const lock = createLock(repoOwner) @@ -77,11 +88,7 @@ module.exports = (options) => { }) Object.keys(unwrappedOperations).forEach(key => { - mfs[key] = promisify(unwrappedOperations[key](options)) - }) - - Object.keys(unwrappedSynchronousOperations).forEach(key => { - mfs[key] = unwrappedSynchronousOperations[key](options) + mfs[key] = unwrappedOperations[key](options) }) return mfs diff --git a/src/core/ls-pull-stream.js b/src/core/ls-pull-stream.js deleted file mode 100644 index 8d95573f4f..0000000000 --- a/src/core/ls-pull-stream.js +++ /dev/null @@ -1,144 +0,0 @@ -'use strict' - -const waterfall = require('async/waterfall') -const UnixFs = require('ipfs-unixfs') -const exporter = require('ipfs-unixfs-exporter') -const { - loadNode, - formatCid, - toMfsPath, - FILE_SEPARATOR, - FILE_TYPES -} = require('./utils') -const pull = require('pull-stream/pull') -const collect = require('pull-stream/sinks/collect') -const asyncMap = require('pull-stream/throughs/async-map') -const filter = require('pull-stream/throughs/filter') -const once = require('pull-stream/sources/once') -const error = require('pull-stream/sources/error') -const defer = require('pull-defer') - -const defaultOptions = { - long: false, - cidBase: 'base58btc' -} - -module.exports = (context) => { - return function mfsLs (path, options = {}) { - if (typeof path === 'object') { - options = path - path = FILE_SEPARATOR - } - - if (path === undefined) { - path = FILE_SEPARATOR - } - - options = Object.assign({}, defaultOptions, options) - - options.long = options.l || options.long - - const deferred = defer.source() - - waterfall([ - (cb) => toMfsPath(context, path, cb), - ({ mfsPath, depth }, cb) => { - pull( - exporter(mfsPath, context.ipld, { - maxDepth: depth - }), - - collect((err, files) => { - if (err) { - return cb(err) - } - - if (files.length > 1) { - return cb(new Error(`Path ${path} had ${files.length} roots`)) - } - - const file = files[0] - - if (!file) { - return cb(new Error(`${path} does not exist`)) - } - - if (file.type !== 'dir') { - return cb(null, once(file)) - } - - let first = true - - return cb(null, pull( - exporter(mfsPath, context.ipld, { - maxDepth: depth + 1 - }), - // first item in list is the directory node - filter(() => { - if (first) { - first = false - return false - } - - return true - }) - )) - }) - ) - }, - (source, cb) => { - cb(null, - pull( - source, - - // load DAGNodes for each file - asyncMap((file, cb) => { - if (!options.long) { - return cb(null, { - name: file.name, - type: 0, - size: 0, - hash: '' - }) - } - - loadNode(context, { - cid: file.cid - }, (err, result) => { - if (err) { - return cb(err) - } - - if (Buffer.isBuffer(result.node)) { - return cb(null, { - name: file.name, - type: 0, - hash: formatCid(file.cid, options.cidBase), - size: result.node.length - }) - } - - const meta = UnixFs.unmarshal(result.node.data) - - cb(null, { - name: file.name, - type: FILE_TYPES[meta.type], - hash: formatCid(file.cid, options.cidBase), - size: meta.fileSize() || 0 - }) - }) - }) - ) - ) - } - ], (err, source) => { - if (err) { - return deferred.resolve(error(err)) - } - - deferred.resolve(source) - }) - - return deferred - } -} diff --git a/src/core/ls-readable-stream.js b/src/core/ls-readable-stream.js deleted file mode 100644 index 69d546d7ef..0000000000 --- a/src/core/ls-readable-stream.js +++ /dev/null @@ -1,10 +0,0 @@ -'use strict' - -const lsPullStream = require('./ls-pull-stream') -const toStream = require('pull-stream-to-stream') - -module.exports = (context) => { - return function mfsLsReadableStream (path, options = {}) { - return toStream.source(lsPullStream(context)(path, options)) - } -} diff --git a/src/core/ls.js b/src/core/ls.js index 471ac2d365..0fe8648776 100644 --- a/src/core/ls.js +++ b/src/core/ls.js @@ -1,28 +1,56 @@ 'use strict' +const exporter = require('ipfs-unixfs-exporter') +const applyDefaultOptions = require('./utils/apply-default-options') +const toMfsPath = require('./utils/to-mfs-path') const { - FILE_SEPARATOR -} = require('./utils') -const pull = require('pull-stream/pull') -const collect = require('pull-stream/sinks/collect') -const lsPullStream = require('./ls-pull-stream') + FILE_SEPARATOR, + FILE_TYPES +} = require('./utils/constants') + +const defaultOptions = { + +} + +const toOutput = (fsEntry) => { + let type = 0 + let size = fsEntry.node.size || fsEntry.node.length + + if (fsEntry.unixfs) { + size = fsEntry.unixfs.fileSize() + type = FILE_TYPES[fsEntry.unixfs.type] + } + + return { + cid: fsEntry.cid, + name: fsEntry.name, + type, + size + } +} module.exports = (context) => { - return function mfsLs (path, options, callback) { - if (typeof path === 'function') { - callback = path + return async function * mfsLs (path = FILE_SEPARATOR, options = {}) { + if (typeof path === 'object' && !(path instanceof String)) { + options = path path = FILE_SEPARATOR - options = {} } - if (typeof options === 'function') { - callback = options - options = {} + options = applyDefaultOptions(options, defaultOptions) + + const mfsPath = await toMfsPath(context, path) + const fsDir = await exporter(mfsPath.mfsPath, context.ipld) + + // single file/node + if (!fsDir.unixfs || !fsDir.unixfs.type.includes('directory')) { + yield toOutput(fsDir) + + return } - pull( - lsPullStream(context)(path, options), - collect(callback) - ) + // directory, perhaps sharded + for await (const fsEntry of fsDir.content(options)) { + yield toOutput(fsEntry) + } } } diff --git a/src/core/mkdir.js b/src/core/mkdir.js index 534e72ad26..1827b27704 100644 --- a/src/core/mkdir.js +++ b/src/core/mkdir.js @@ -1,21 +1,18 @@ 'use strict' -const waterfall = require('async/waterfall') -const asyncMap = require('async/map') +const errCode = require('err-code') const log = require('debug')('ipfs:mfs:mkdir') const exporter = require('ipfs-unixfs-exporter') -const pull = require('pull-stream/pull') -const filter = require('pull-stream/throughs/filter') -const map = require('pull-stream/throughs/map') -const collect = require('pull-stream/sinks/collect') +const createNode = require('./utils/create-node') +const toPathComponents = require('./utils/to-path-components') +const updateMfsRoot = require('./utils/update-mfs-root') +const updateTree = require('./utils/update-tree') +const addLink = require('./utils/add-link') +const withMfsRoot = require('./utils/with-mfs-root') +const applyDefaultOptions = require('./utils/apply-default-options') const { - createNode, - toMfsPath, - toPathComponents, - updateMfsRoot, - updateTree, FILE_SEPARATOR -} = require('./utils') +} = require('./utils/constants') const defaultOptions = { parents: false, @@ -27,123 +24,105 @@ const defaultOptions = { } module.exports = (context) => { - return function mfsMkdir (path, options, callback) { - if (typeof options === 'function') { - callback = options - options = {} - } - - options = Object.assign({}, defaultOptions, options) - - options.parents = options.p || options.parents - options.cidVersion = options.cidVersion || 0 + return async function mfsMkdir (path, options) { + options = applyDefaultOptions(options, defaultOptions) if (!path) { - return callback(new Error('no path given to Mkdir')) + throw new Error('no path given to Mkdir') } path = path.trim() if (path === FILE_SEPARATOR) { - return callback(options.parents ? null : new Error(`cannot create directory '${FILE_SEPARATOR}': Already exists`)) + if (options.parents) { + return + } + + throw errCode(new Error(`cannot create directory '${FILE_SEPARATOR}': Already exists`), 'ERR_INVALID_PATH') + } + + if (path.substring(0, 1) !== FILE_SEPARATOR) { + throw errCode(new Error('paths must start with a leading /'), 'ERR_INVALID_PATH') } log(`Creating ${path}`) const pathComponents = toPathComponents(path) - waterfall([ - (cb) => toMfsPath(context, path, cb), - // figure out the CID of the containing folder - ({ mfsDirectory, mfsPath, root }, cb) => { - const toExport = toPathComponents(mfsPath) - .slice(1) - - let depth = 0 - - let exported = '' - - pull( - exporter(mfsPath, context.ipld, { - fullPath: true - }), - // find the directory from each level in the filesystem - filter(node => { - if (node.name === toExport[depth]) { - depth++ - - return true - } - - return false - }), - // load DAGNode for the containing folder - map((node) => { - const currentPath = `${exported}${exported ? '/' : ''}${toExport[node.depth]}` - - if (node.type !== 'dir') { - throw new Error(`cannot access ${currentPath}: Not a directory`) - } - exported = currentPath - - return { - cid: node.cid, - name: node.name - } - }), - collect(cb) - ) - }, - // Update the MFS tree from the containingFolder upwards - (trail, cb) => { - pathComponents.unshift('/') - - // we managed to load all of the requested path segments so the - // directory already exists - if (trail.length === pathComponents.length) { - return cb(new Error('file already exists')) - } + if (pathComponents[0] === 'ipfs') { + throw errCode(new Error("path cannot have the prefix 'ipfs'"), 'ERR_INVALID_PATH') + } + + let root = await withMfsRoot(context) + let parent + let trail = [] + const emptyDir = await createNode(context, 'directory', options) - asyncMap(pathComponents.map((part, index) => ({ part, index })), ({ part, index }, cb) => { - if (trail[index]) { - return cb(null, { - name: part, - ...trail[index] - }) + // make sure the containing folder exists, creating it if necessary + for (let i = 0; i <= pathComponents.length; i++) { + const subPathComponents = pathComponents.slice(0, i) + const subPath = `/ipfs/${root}/${subPathComponents.join('/')}` + + try { + parent = await exporter(subPath, context.ipld) + log(`${subPath} existed`) + log(`${subPath} had children ${parent.node.Links.map(link => link.Name)}`) + + if (i === pathComponents.length) { + if (options.parents) { + return } - // if we are not at the last path component and we are - // not creating intermediate directories make a fuss - if (index !== pathComponents.length - 1 && !options.parents) { - return cb(new Error('file does not exist')) + throw errCode(new Error('file already exists'), 'ERR_ALREADY_EXISTS') + } + + trail.push({ + name: parent.name, + cid: parent.cid + }) + } catch (err) { + if (err.code === 'ERR_NOT_FOUND') { + if (i < pathComponents.length && !options.parents) { + throw errCode(new Error(`Intermediate directory path ${subPath} does not exist, use the -p flag to create it`), 'ERR_NOT_FOUND') } - waterfall([ - (done) => createNode(context, 'directory', options, done), - ({ cid, node }, done) => { - done(null, { - cid, - size: node.size, - name: part - }) - } - ], cb) - }, cb) - }, - - // update the tree from the leaf to the root - (trail, cb) => updateTree(context, trail, options, cb), - - // Update the MFS record with the new CID for the root of the tree - ({ cid }, cb) => updateMfsRoot(context, cid, cb) - ], (error) => { - if (error && error.message.includes('file already exists') && options.parents) { - // when the directory already exists and we are creating intermediate - // directories, do not error out (consistent with mkdir -p) - error = null + // add the intermediate directory + await addEmptyDir(context, subPathComponents[subPathComponents.length - 1], emptyDir, trail[trail.length - 1], trail, options) + } else { + throw err + } } + } + + // add an empty dir to the last path component + // await addEmptyDir(context, pathComponents[pathComponents.length - 1], emptyDir, parent, trail) + + // update the tree from the leaf to the root + const newRootCid = await updateTree(context, trail, options) - callback(error) - }) + // Update the MFS record with the new CID for the root of the tree + await updateMfsRoot(context, newRootCid) } } + +const addEmptyDir = async (context, childName, emptyDir, parent, trail, options) => { + log(`Adding empty dir called ${childName} to ${parent.cid}`) + + const result = await addLink(context, { + parent: parent.node, + parentCid: parent.cid, + size: emptyDir.node.size, + cid: emptyDir.cid, + name: childName, + format: options.format, + hashAlg: options.hashAlg, + cidVersion: options.cidVersion + }) + + trail[trail.length - 1].cid = result.cid + + trail.push({ + name: childName, + cid: emptyDir.cid + }) +} diff --git a/src/core/mv.js b/src/core/mv.js index 084b2935e4..00ae295cc1 100644 --- a/src/core/mv.js +++ b/src/core/mv.js @@ -1,10 +1,7 @@ 'use strict' -const series = require('async/series') -const waterfall = require('async/waterfall') -const { - toSources -} = require('./utils') +const applyDefaultOptions = require('./utils/apply-default-options') +const toSources = require('./utils/to-sources') const cp = require('./cp') const rm = require('./rm') @@ -18,34 +15,28 @@ const defaultOptions = { } module.exports = (context) => { - return function mfsMv () { - let args = Array.from(arguments) - const callback = args.pop() - + return async function mfsMv (...args) { if (Array.isArray(args[0])) { args = args[0].concat(args.slice(1)) } - waterfall([ - (cb) => toSources(context, args, defaultOptions, cb), - ({ sources, options }, cb) => { - // remove the callback - const cpArgs = sources - .map(source => source.path).concat(options) + const { + sources + } = await toSources(context, args) + const options = applyDefaultOptions(args, defaultOptions) + + const cpArgs = sources + .map(source => source.path).concat(options) - // remove the last source as it'll be the destination - const rmArgs = sources - .slice(0, -1) - .map(source => source.path) - .concat(Object.assign(options, { - recursive: true - })) + // remove the last source as it'll be the destination + const rmArgs = sources + .slice(0, -1) + .map(source => source.path) + .concat(Object.assign(options, { + recursive: true + })) - series([ - (cb) => cp(context).apply(null, cpArgs.concat(cb)), - (cb) => rm(context).apply(null, rmArgs.concat(cb)) - ], cb) - } - ], callback) + await cp(context).apply(null, cpArgs) + await rm(context).apply(null, rmArgs) } } diff --git a/src/core/read-pull-stream.js b/src/core/read-pull-stream.js deleted file mode 100644 index 99b5cc1a07..0000000000 --- a/src/core/read-pull-stream.js +++ /dev/null @@ -1,74 +0,0 @@ -'use strict' - -const exporter = require('ipfs-unixfs-exporter') -const pull = require('pull-stream/pull') -const once = require('pull-stream/sources/once') -const asyncMap = require('pull-stream/throughs/async-map') -const flatten = require('pull-stream/throughs/flatten') -const filter = require('pull-stream/throughs/filter') -const defer = require('pull-defer') -const collect = require('pull-stream/sinks/collect') -const { - toMfsPath -} = require('./utils') -const log = require('debug')('ipfs:mfs:read-pull-stream') - -const defaultOptions = { - offset: 0, - length: undefined -} - -module.exports = (context) => { - return function mfsReadPullStream (path, options = {}) { - options = Object.assign({}, defaultOptions, options) - - // support legacy go arguments - options.length = options.length || options.count - - log(`Reading ${path}`) - - const deferred = defer.source() - - pull( - once(path), - asyncMap((path, cb) => toMfsPath(context, path, cb)), - asyncMap(({ mfsPath, root }, cb) => { - log(`Exporting ${mfsPath}`) - - return pull( - exporter(mfsPath, context.ipld, { - offset: options.offset, - length: options.length - }), - collect(cb) - ) - }), - flatten(), - filter(), - collect((error, files) => { - if (error) { - return deferred.abort(error) - } - - if (!files || !files.length) { - return deferred.abort(new Error(`${path} does not exist`)) - } - - const file = files[0] - - if (file.type !== 'file') { - return deferred.abort(new Error(`${path} was not a file`)) - } - - if (!file.content) { - return deferred.abort(new Error(`Could not load content stream from ${path}`)) - } - - log(`Got ${path} content`) - deferred.resolve(files[0].content) - }) - ) - - return deferred - } -} diff --git a/src/core/read-readable-stream.js b/src/core/read-readable-stream.js deleted file mode 100644 index a851722607..0000000000 --- a/src/core/read-readable-stream.js +++ /dev/null @@ -1,10 +0,0 @@ -'use strict' - -const readPullStream = require('./read-pull-stream') -const toStream = require('pull-stream-to-stream') - -module.exports = (context) => { - return function mfsReadReadableStream (path, options = {}) { - return toStream.source(readPullStream(context)(path, options)) - } -} diff --git a/src/core/read.js b/src/core/read.js index 9160427783..5ff95efc12 100644 --- a/src/core/read.js +++ b/src/core/read.js @@ -1,25 +1,39 @@ 'use strict' -const pull = require('pull-stream/pull') -const collect = require('pull-stream/sinks/collect') -const readPullStream = require('./read-pull-stream') +const exporter = require('ipfs-unixfs-exporter') +const applyDefaultOptions = require('./utils/apply-default-options') +const toMfsPath = require('./utils/to-mfs-path') +const errCode = require('err-code') + +const defaultOptions = { + offset: 0, + length: Infinity +} module.exports = (context) => { - return function mfsRead (path, options, callback) { - if (typeof options === 'function') { - callback = options - options = {} - } + return function mfsRead (path, options = {}) { + options = applyDefaultOptions(options, defaultOptions) + + return { + [Symbol.asyncIterator]: async function * read () { + const mfsPath = await toMfsPath(context, path) + const result = await exporter(mfsPath.mfsPath, context.ipld) - pull( - readPullStream(context)(path, options), - collect((error, buffers) => { - if (error) { - return callback(error) + if (result.unixfs.type !== 'file') { + throw errCode(new Error(`${path} was not a file`), 'ERR_NOT_FILE') } - return callback(null, Buffer.concat(buffers)) - }) - ) + if (!result.content) { + throw errCode(new Error(`Could not load content stream from ${path}`), 'ERR_NO_CONTENT') + } + + for await (const buf of result.content({ + offset: options.offset, + length: options.length + })) { + yield buf + } + } + } } } diff --git a/src/core/rm.js b/src/core/rm.js index a5288abf5e..f0ff80c3c0 100644 --- a/src/core/rm.js +++ b/src/core/rm.js @@ -1,84 +1,80 @@ 'use strict' -const waterfall = require('async/waterfall') -const series = require('async/series') +const errCode = require('err-code') +const updateTree = require('./utils/update-tree') +const updateMfsRoot = require('./utils/update-mfs-root') +const toSources = require('./utils/to-sources') +const removeLink = require('./utils/remove-link') +const toMfsPath = require('./utils/to-mfs-path') +const toTrail = require('./utils/to-trail') +const applyDefaultOptions = require('./utils/apply-default-options') const { - updateTree, - updateMfsRoot, - toSources, - removeLink, - toMfsPath, - toTrail, FILE_SEPARATOR -} = require('./utils') +} = require('./utils/constants') const defaultOptions = { recursive: false, cidVersion: 0, hashAlg: 'sha2-256', - format: 'dag-pb' + format: 'dag-pb', + flush: true } module.exports = (context) => { - return function mfsRm () { + return async function mfsRm () { const args = Array.from(arguments) - const callback = args.pop() - waterfall([ - (cb) => toSources(context, args, defaultOptions, cb), - ({ sources, options }, cb) => { - if (!sources.length) { - return cb(new Error('Please supply at least one path to remove')) - } + const { + sources + } = await toSources(context, args, defaultOptions) + const options = applyDefaultOptions(args, defaultOptions) - series( - sources.map(source => { - return (done) => removePath(context, source.path, options, done) - }), - (error) => cb(error) - ) + if (!sources.length) { + throw errCode(new Error('Please supply at least one path to remove'), 'ERR_INVALID_PARAMS') + } + + sources.forEach(source => { + if (source.path === FILE_SEPARATOR) { + throw errCode(new Error('Cannot delete root'), 'ERR_INVALID_PARAMS') } - ], callback) - } -} + }) -const removePath = (context, path, options, callback) => { - if (path === FILE_SEPARATOR) { - return callback(new Error('Cannot delete root')) + for (const source of sources) { + await removePath(context, source.path, options) + } } +} - waterfall([ - (cb) => toMfsPath(context, path, cb), - ({ mfsPath, parts }, cb) => toTrail(context, mfsPath, options, (err, trail) => cb(err, { mfsPath, parts, trail })), - ({ trail }, cb) => { - const child = trail.pop() - const parent = trail[trail.length - 1] +const removePath = async (context, path, options) => { + const mfsPath = await toMfsPath(context, path) + const trail = await toTrail(context, mfsPath.mfsPath, options) + const child = trail.pop() + const parent = trail[trail.length - 1] - if (!parent) { - return cb(new Error(`${path} does not exist`)) - } + if (!parent) { + throw errCode(new Error(`${path} does not exist`), 'ERR_NOT_FOUND') + } - if (child.type === 'dir' && !options.recursive) { - return cb(new Error(`${path} is a directory, use -r to remove directories`)) - } + if (child.type === 'directory' && !options.recursive) { + throw errCode(new Error(`${path} is a directory, use -r to remove directories`), 'ERR_WAS_DIR') + } - waterfall([ - (done) => removeLink(context, { - parentCid: parent.cid, - name: child.name - }, done), - ({ cid }, done) => { - parent.cid = cid + const { + cid + } = await removeLink(context, { + parentCid: parent.cid, + name: child.name, + format: options.format, + hashAlg: options.hashAlg, + cidVersion: options.cidVersion, + flush: options.flush + }) - done(null, trail) - } - ], cb) - }, + parent.cid = cid - // update the tree with the new child - (trail, cb) => updateTree(context, trail, options, cb), + // update the tree with the new child + const newRootCid = await updateTree(context, trail, options) - // Update the MFS record with the new CID for the root of the tree - ({ cid }, cb) => updateMfsRoot(context, cid, cb) - ], callback) + // Update the MFS record with the new CID for the root of the tree + await updateMfsRoot(context, newRootCid) } diff --git a/src/core/stat.js b/src/core/stat.js index f1b3b06b25..531c06a756 100644 --- a/src/core/stat.js +++ b/src/core/stat.js @@ -1,115 +1,90 @@ 'use strict' -const unmarshal = require('ipfs-unixfs').unmarshal -const { - formatCid, - toMfsPath, - loadNode -} = require('./utils') -const waterfall = require('async/waterfall') -const pull = require('pull-stream/pull') -const collect = require('pull-stream/sinks/collect') -const asyncMap = require('pull-stream/throughs/async-map') +const applyDefaultOptions = require('./utils/apply-default-options') +const toMfsPath = require('./utils/to-mfs-path') const exporter = require('ipfs-unixfs-exporter') const log = require('debug')('ipfs:mfs:stat') +const errCode = require('err-code') const defaultOptions = { - hash: false, - size: false, - withLocal: false, - cidBase: 'base58btc' + withLocal: false } module.exports = (context) => { - return function mfsStat (path, options, callback) { - if (typeof options === 'function') { - callback = options - options = {} - } - - options = Object.assign({}, defaultOptions, options) + return async function mfsStat (path, options) { + options = applyDefaultOptions(options, defaultOptions) log(`Fetching stats for ${path}`) - waterfall([ - (cb) => toMfsPath(context, path, cb), - ({ mfsPath, depth }, cb) => { - pull( - exporter(mfsPath, context.ipld, { - maxDepth: depth - }), - - asyncMap((file, cb) => { - if (options.hash) { - return cb(null, { - hash: formatCid(file.cid, options.cidBase) - }) - } - - if (options.size) { - return cb(null, { - size: file.size - }) - } + const { + type, + cid, + mfsPath + } = await toMfsPath(context, path) - loadNode(context, { - cid: file.cid - }, (err, result) => { - if (err) { - return cb(err) - } + let exportPath = type === 'ipfs' && cid ? cid : mfsPath + let file - const { - node, cid - } = result - - if (Buffer.isBuffer(node)) { - return cb(null, { - hash: formatCid(cid, options.cidBase), - size: node.length, - cumulativeSize: node.length, - blocks: 0, - type: 'file', // really? - local: undefined, - sizeLocal: undefined, - withLocality: false - }) - } - - const meta = unmarshal(node.data) - let blocks = node.links.length + try { + file = await exporter(exportPath, context.ipld) + } catch (err) { + if (err.code === 'ERR_NOT_FOUND') { + throw errCode(new Error(`${path} does not exist`), 'ERR_NOT_FOUND') + } - if (meta.type === 'file') { - blocks = meta.blockSizes.length - } + throw err + } - cb(null, { - hash: formatCid(cid, options.cidBase), - size: meta.fileSize() || 0, - cumulativeSize: node.size, - blocks: blocks, - type: meta.type, - local: undefined, - sizeLocal: undefined, - withLocality: false - }) - }) - }), - collect((error, results) => { - if (error) { - return cb(error) - } + if (!statters[file.cid.codec]) { + throw new Error(`Cannot stat codec ${file.cid.codec}`) + } - if (!results.length) { - return cb(new Error(`${path} does not exist`)) - } + return statters[file.cid.codec](file, options) + } +} - log(`Stats for ${path}`, results[0]) +const statters = { + raw: (file) => { + return { + cid: file.cid, + size: file.node.length, + cumulativeSize: file.node.length, + blocks: 0, + type: 'file', // for go compatibility + local: undefined, + sizeLocal: undefined, + withLocality: false + } + }, + 'dag-pb': (file) => { + let blocks = file.node.Links.length + let size = file.node.size + let cumulativeSize = file.node.size + let nodeType = null + + if (file.unixfs) { + size = file.unixfs.fileSize() + nodeType = file.unixfs.type + + if (nodeType.includes('directory')) { + size = 0 + cumulativeSize = file.node.size + } - return cb(null, results[0]) - }) - ) + if (nodeType === 'file') { + blocks = file.unixfs.blockSizes.length } - ], callback) + } + + return { + cid: file.cid, + size: size, + cumulativeSize: cumulativeSize, + blocks: blocks, + type: nodeType, + local: undefined, + sizeLocal: undefined, + withLocality: false + } } } diff --git a/src/core/utils/add-link.js b/src/core/utils/add-link.js index e8c8692788..b72b0d05d6 100644 --- a/src/core/utils/add-link.js +++ b/src/core/utils/add-link.js @@ -5,11 +5,9 @@ const { DAGLink } = require('ipld-dag-pb') const CID = require('cids') -const waterfall = require('async/waterfall') -const whilst = require('async/whilst') const log = require('debug')('ipfs:mfs:core:utils:add-link') const UnixFS = require('ipfs-unixfs') -const DirSharded = require('ipfs-unixfs-importer/src/importer/dir-sharded') +const DirSharded = require('ipfs-unixfs-importer/src/dir-sharded') const { updateHamtDirectory, recreateHamtLevel, @@ -17,49 +15,32 @@ const { toPrefix, addLinksToHamtBucket } = require('./hamt-utils') - -const defaultOptions = { - parent: undefined, - cid: undefined, - name: '', - size: undefined, - flush: true, - cidVersion: 0, - hashAlg: 'sha2-256', - codec: 'dag-pb', - shardSplitThreshold: 1000 -} - -const addLink = (context, options, callback) => { - options = Object.assign({}, defaultOptions, options) - - if (!options.parentCid) { - return callback(new Error('No parent CID passed to addLink')) +const errCode = require('err-code') +const mc = require('multicodec') +const mh = require('multihashes') +const last = require('async-iterator-last') + +const addLink = async (context, options) => { + if (!options.parentCid && !options.parent) { + throw errCode(new Error('No parent node or CID passed to addLink'), 'EINVALIDPARENT') } - if (!CID.isCID(options.parentCid)) { - return callback(new Error('Invalid CID passed to addLink')) + if (options.parentCid && !CID.isCID(options.parentCid)) { + throw errCode(new Error('Invalid CID passed to addLink'), 'EINVALIDPARENTCID') } if (!options.parent) { - log('Loading parent node', options.parentCid.toBaseEncodedString()) - - return waterfall([ - (cb) => context.ipld.get(options.parentCid, cb), - (result, cb) => cb(null, result.value), - (node, cb) => addLink(context, { - ...options, - parent: node - }, cb) - ], callback) + log(`Loading parent node ${options.parentCid}`) + + options.parent = await context.ipld.get(options.parentCid) } if (!options.cid) { - return callback(new Error('No child cid passed to addLink')) + throw errCode(new Error('No child cid passed to addLink'), 'EINVALIDCHILDCID') } if (!options.name) { - return callback(new Error('No child name passed to addLink')) + throw errCode(new Error('No child name passed to addLink'), 'EINVALIDCHILDNAME') } if (!CID.isCID(options.cid)) { @@ -67,103 +48,90 @@ const addLink = (context, options, callback) => { } if (!options.size && options.size !== 0) { - return callback(new Error('No child size passed to addLink')) + throw errCode(new Error('No child size passed to addLink'), 'EINVALIDCHILDSIZE') } - const meta = UnixFS.unmarshal(options.parent.data) + const meta = UnixFS.unmarshal(options.parent.Data) if (meta.type === 'hamt-sharded-directory') { log('Adding link to sharded directory') - return addToShardedDirectory(context, options, callback) + return addToShardedDirectory(context, options) } - if (options.parent.links.length >= options.shardSplitThreshold) { + if (options.parent.Links.length >= options.shardSplitThreshold) { log('Converting directory to sharded directory') - return convertToShardedDirectory(context, options, callback) + return convertToShardedDirectory(context, options) } - log(`Adding ${options.name} to regular directory`) + log(`Adding ${options.name} (${options.cid}) to regular directory`) - addToDirectory(context, options, callback) + return addToDirectory(context, options) } -const convertToShardedDirectory = (context, options, callback) => { - createShard(context, options.parent.links.map(link => ({ - name: link.name, - size: link.size, - multihash: link.cid.buffer +const convertToShardedDirectory = async (context, options) => { + const result = await createShard(context, options.parent.Links.map(link => ({ + name: link.Name, + size: link.Tsize, + cid: link.Hash })).concat({ name: options.name, size: options.size, - multihash: options.cid.buffer - }), {}, (err, result) => { - if (!err) { - log('Converted directory to sharded directory', result.cid.toBaseEncodedString()) - } + cid: options.cid + }), options) - callback(err, result) - }) -} + log(`Converted directory to sharded directory ${result.cid}`) -const addToDirectory = (context, options, callback) => { - waterfall([ - (done) => DAGNode.rmLink(options.parent, options.name, done), - (parent, done) => DAGNode.addLink(parent, new DAGLink(options.name, options.size, options.cid), done), - (parent, done) => { - // Persist the new parent DAGNode - context.ipld.put(parent, { - version: options.cidVersion, - format: options.codec, - hashAlg: options.hashAlg, - hashOnly: !options.flush - }, (error, cid) => done(error, { - node: parent, - cid - })) - } - ], callback) + return result } -const addToShardedDirectory = (context, options, callback) => { - return addFileToShardedDirectoryy(context, options, (err, result) => { - if (err) { - return callback(err) - } +const addToDirectory = async (context, options) => { + let parent = await DAGNode.rmLink(options.parent, options.name) + parent = await DAGNode.addLink(parent, new DAGLink(options.name, options.size, options.cid)) - const { - shard, path - } = result - - shard.flush('', context.ipld, null, async (err, result) => { - if (err) { - return callback(err) - } - - // we have written out the shard, but only one sub-shard will have been written so replace it in the original shard - const oldLink = options.parent.links - .find(link => link.name.substring(0, 2) === path[0].prefix) - - const newLink = result.node.links - .find(link => link.name.substring(0, 2) === path[0].prefix) - - waterfall([ - (done) => { - if (!oldLink) { - return done(null, options.parent) - } - - DAGNode.rmLink(options.parent, oldLink.name, done) - }, - (parent, done) => DAGNode.addLink(parent, newLink, done), - (parent, done) => updateHamtDirectory(context, parent.links, path[0].bucket, options, done) - ], callback) - }) + const format = mc[options.format.toUpperCase().replace(/-/g, '_')] + const hashAlg = mh.names[options.hashAlg] + + // Persist the new parent DAGNode + const cid = await context.ipld.put(parent, format, { + cidVersion: options.cidVersion, + hashAlg, + hashOnly: !options.flush }) + + return { + node: parent, + cid + } } -const addFileToShardedDirectoryy = (context, options, callback) => { +const addToShardedDirectory = async (context, options) => { + const { + shard, path + } = await addFileToShardedDirectory(context, options) + + const result = await last(shard.flush('', context.ipld)) + + // we have written out the shard, but only one sub-shard will have been written so replace it in the original shard + const oldLink = options.parent.Links + .find(link => link.Name.substring(0, 2) === path[0].prefix) + + const newLink = result.node.Links + .find(link => link.Name.substring(0, 2) === path[0].prefix) + + let parent = options.parent + + if (oldLink) { + parent = await DAGNode.rmLink(options.parent, oldLink.Name) + } + + parent = await DAGNode.addLink(parent, newLink) + + return updateHamtDirectory(context, parent.Links, path[0].bucket, options) +} + +const addFileToShardedDirectory = async (context, options) => { const file = { name: options.name, cid: options.cid, @@ -171,115 +139,94 @@ const addFileToShardedDirectoryy = (context, options, callback) => { } // start at the root bucket and descend, loading nodes as we go - recreateHamtLevel(options.parent.links, null, null, null, async (err, rootBucket) => { - if (err) { - return callback(err) + const rootBucket = await recreateHamtLevel(options.parent.Links) + + const shard = new DirSharded({ + root: true, + dir: true, + parent: null, + parentKey: null, + path: '', + dirty: true, + flat: false + }, options) + shard._bucket = rootBucket + + // load subshards until the bucket & position no longer changes + const position = await rootBucket._findNewBucketAndPos(file.name) + const path = toBucketPath(position) + path[0].node = options.parent + let index = 0 + + while (index < path.length) { + let segment = path[index] + index++ + let node = segment.node + + let link = node.Links + .find(link => link.Name.substring(0, 2) === segment.prefix) + + if (!link) { + // prefix is new, file will be added to the current bucket + log(`Link ${segment.prefix}${file.name} will be added`) + index = path.length + + break } - const shard = new DirSharded({ - root: true, - dir: true, - parent: null, - parentKey: null, - path: '', - dirty: true, - flat: false - }) - shard._bucket = rootBucket - - // load subshards until the bucket & position no longer changes - const position = await rootBucket._findNewBucketAndPos(file.name) - const path = toBucketPath(position) - path[0].node = options.parent - let index = 0 - - whilst( - () => index < path.length, - (next) => { - let segment = path[index] - index++ - let node = segment.node - - let link = node.links - .find(link => link.name.substring(0, 2) === segment.prefix) - - if (!link) { - // prefix is new, file will be added to the current bucket - log(`Link ${segment.prefix}${file.name} will be added`) - index = path.length - return next(null, shard) - } - - if (link.name === `${segment.prefix}${file.name}`) { - // file already existed, file will be added to the current bucket - log(`Link ${segment.prefix}${file.name} will be replaced`) - index = path.length - return next(null, shard) - } - - if (link.name.length > 2) { - // another file had the same prefix, will be replaced with a subshard - log(`Link ${link.name} will be replaced with a subshard`) - index = path.length - return next(null, shard) - } - - // load sub-shard - log(`Found subshard ${segment.prefix}`) - context.ipld.get(link.cid, (err, result) => { - if (err) { - return next(err) - } - - // subshard hasn't been loaded, descend to the next level of the HAMT - if (!path[index]) { - log(`Loaded new subshard ${segment.prefix}`) - const node = result.value - - return recreateHamtLevel(node.links, rootBucket, segment.bucket, parseInt(segment.prefix, 16), async (err) => { - if (err) { - return next(err) - } - - const position = await rootBucket._findNewBucketAndPos(file.name) - - path.push({ - bucket: position.bucket, - prefix: toPrefix(position.pos), - node: node - }) - - return next(null, shard) - }) - } - - const nextSegment = path[index] - - // add next level's worth of links to bucket - addLinksToHamtBucket(result.value.links, nextSegment.bucket, rootBucket, (error) => { - nextSegment.node = result.value - - next(error, shard) - }) - }) - }, - (err, shard) => { - if (err) { - return callback(err) - } - - // finally add the new file into the shard - shard.put(file.name, { - size: file.size, - multihash: file.cid.buffer - }, (err) => { - callback(err, { - shard, path - }) - }) - } - ) + if (link.Name === `${segment.prefix}${file.name}`) { + // file already existed, file will be added to the current bucket + log(`Link ${segment.prefix}${file.name} will be replaced`) + index = path.length + + break + } + + if (link.Name.length > 2) { + // another file had the same prefix, will be replaced with a subshard + log(`Link ${link.Name} will be replaced with a subshard`) + index = path.length + + break + } + + // load sub-shard + log(`Found subshard ${segment.prefix}`) + const subShard = await context.ipld.get(link.Hash) + + // subshard hasn't been loaded, descend to the next level of the HAMT + if (!path[index]) { + log(`Loaded new subshard ${segment.prefix}`) + await recreateHamtLevel(subShard.Links, rootBucket, segment.bucket, parseInt(segment.prefix, 16)) + + const position = await rootBucket._findNewBucketAndPos(file.name) + + path.push({ + bucket: position.bucket, + prefix: toPrefix(position.pos), + node: subShard + }) + + break + } + + const nextSegment = path[index] + + // add next level's worth of links to bucket + await addLinksToHamtBucket(subShard.Links, nextSegment.bucket, rootBucket) + + nextSegment.node = subShard + } + + // finally add the new file into the shard + await shard._bucket.put(file.name, { + size: file.size, + cid: file.cid }) + + return { + shard, path + } } const toBucketPath = (position) => { diff --git a/src/core/utils/apply-default-options.js b/src/core/utils/apply-default-options.js new file mode 100644 index 0000000000..42fa2ba692 --- /dev/null +++ b/src/core/utils/apply-default-options.js @@ -0,0 +1,53 @@ +'use strict' + +const errCode = require('err-code') + +module.exports = (options = {}, defaults) => { + if (Array.isArray(options)) { + options = options.filter(arg => typeof arg === 'object').pop() || {} + } + + const output = {} + + for (let key in defaults) { + if (options[key] !== null && options[key] !== undefined) { + output[key] = options[key] + } else { + output[key] = defaults[key] + } + } + + const format = output.format || output.codec + + if (format && isNaN(format)) { + output.format = format + delete output.codec + } + + // support legacy go arguments + if (options.count !== undefined) { + output.length = options.count + } + + if (options.p !== undefined) { + output.parents = options.p + } + + if (options.l !== undefined) { + output.long = options.l + } + + if (!output.length && output.length !== 0) { + output.length = Infinity + } + + if (output.offset < 0) { + throw errCode(new Error('cannot have negative write offset'), 'ERR_INVALID_PARAMS') + } + + if (output.length < 0) { + throw errCode(new Error('cannot have negative byte count'), 'ERR_INVALID_PARAMS') + } + + return output +} diff --git a/src/core/utils/count-stream-bytes.js b/src/core/utils/count-stream-bytes.js deleted file mode 100644 index 7627cac18f..0000000000 --- a/src/core/utils/count-stream-bytes.js +++ /dev/null @@ -1,17 +0,0 @@ -'use strict' - -const through = require('pull-stream/throughs/through') - -const countStreamBytes = (callback) => { - let bytesRead = 0 - - return through((buffer) => { - bytesRead += buffer.length - - return buffer - }, () => { - callback(bytesRead) - }) -} - -module.exports = countStreamBytes diff --git a/src/core/utils/create-lock.js b/src/core/utils/create-lock.js index a7155fa529..12002cce83 100644 --- a/src/core/utils/create-lock.js +++ b/src/core/utils/create-lock.js @@ -1,7 +1,6 @@ 'use strict' const mortice = require('mortice') -const log = require('debug')('ipfs:mfs:lock') let lock @@ -17,51 +16,20 @@ module.exports = (repoOwner) => { singleProcess: repoOwner }) - const performOperation = (type, func, args, callback) => { - log(`Queuing ${type} operation`) - - mutex[`${type}Lock`](() => { - return new Promise((resolve, reject) => { - args.push((error, result) => { - log(`${type.substring(0, 1).toUpperCase()}${type.substring(1)} operation callback invoked${error ? ' with error: ' + error.message : ''}`) - - if (error) { - return reject(error) - } - - resolve(result) - }) - log(`Starting ${type} operation`) - func.apply(null, args) - }) - }) - .then((result) => { - log(`Finished ${type} operation`) - - callback(null, result) - }, (error) => { - log(`Finished ${type} operation with error: ${error.message}`) - - callback(error) - }) - } - lock = { readLock: (func) => { - return function () { - const args = Array.from(arguments) - let callback = args.pop() - - performOperation('read', func, args, callback) + return (...args) => { + return mutex.readLock(() => { + return func.apply(null, args) + }) } }, writeLock: (func) => { - return function () { - const args = Array.from(arguments) - let callback = args.pop() - - performOperation('write', func, args, callback) + return (...args) => { + return mutex.writeLock(() => { + return func.apply(null, args) + }) } } } diff --git a/src/core/utils/create-node.js b/src/core/utils/create-node.js index c38399dd39..040cf88c54 100644 --- a/src/core/utils/create-node.js +++ b/src/core/utils/create-node.js @@ -1,23 +1,26 @@ 'use strict' -const waterfall = require('async/waterfall') const UnixFS = require('ipfs-unixfs') const { DAGNode } = require('ipld-dag-pb') +const mc = require('multicodec') +const mh = require('multihashes') -const createNode = (context, type, options, callback) => { - waterfall([ - (done) => DAGNode.create(new UnixFS(type).marshal(), [], done), - (node, done) => context.ipld.put(node, { - version: options.cidVersion, - format: options.format, - hashAlg: options.hashAlg - }, (err, cid) => done(err, { - cid, - node - })) - ], callback) +const createNode = async (context, type, options) => { + const format = mc[options.format.toUpperCase().replace(/-/g, '_')] + const hashAlg = mh.names[options.hashAlg] + + const node = DAGNode.create(new UnixFS(type).marshal()) + const cid = await context.ipld.put(node, format, { + cidVersion: options.cidVersion, + hashAlg + }) + + return { + cid, + node + } } module.exports = createNode diff --git a/src/core/utils/format-cid.js b/src/core/utils/format-cid.js deleted file mode 100644 index ab9bae3ea3..0000000000 --- a/src/core/utils/format-cid.js +++ /dev/null @@ -1,15 +0,0 @@ -'use strict' - -const CID = require('cids') - -module.exports = (cid, base) => { - if (Buffer.isBuffer(cid)) { - cid = new CID(cid) - } - - if (base === 'base58btc') { - return cid.toBaseEncodedString() - } - - return cid.toV1().toBaseEncodedString(base) -} diff --git a/src/core/utils/hamt-utils.js b/src/core/utils/hamt-utils.js index 00ec1e279c..df46e8dbd4 100644 --- a/src/core/utils/hamt-utils.js +++ b/src/core/utils/hamt-utils.js @@ -3,41 +3,38 @@ const { DAGNode } = require('ipld-dag-pb') -const waterfall = require('async/waterfall') -const whilst = require('async/whilst') -const series = require('async/series') const Bucket = require('hamt-sharding/src/bucket') -const DirSharded = require('ipfs-unixfs-importer/src/importer/dir-sharded') +const DirSharded = require('ipfs-unixfs-importer/src/dir-sharded') const log = require('debug')('ipfs:mfs:core:utils:hamt-utils') const UnixFS = require('ipfs-unixfs') +const mc = require('multicodec') +const mh = require('multihashes') +const last = require('async-iterator-last') -const updateHamtDirectory = (context, links, bucket, options, callback) => { +const updateHamtDirectory = async (context, links, bucket, options) => { // update parent with new bit field - waterfall([ - (cb) => { - const data = Buffer.from(bucket._children.bitField().reverse()) - const dir = new UnixFS('hamt-sharded-directory', data) - dir.fanout = bucket.tableSize() - dir.hashType = DirSharded.hashFn.code - - DAGNode.create(dir.marshal(), links, cb) - }, - (parent, done) => { - // Persist the new parent DAGNode - context.ipld.put(parent, { - version: options.cidVersion, - format: options.codec, - hashAlg: options.hashAlg, - hashOnly: !options.flush - }, (error, cid) => done(error, { - node: parent, - cid - })) - } - ], callback) + const data = Buffer.from(bucket._children.bitField().reverse()) + const dir = new UnixFS('hamt-sharded-directory', data) + dir.fanout = bucket.tableSize() + dir.hashType = DirSharded.hashFn.code + + const format = mc[options.format.toUpperCase().replace(/-/g, '_')] + const hashAlg = mh.names[options.hashAlg] + + const parent = DAGNode.create(dir.marshal(), links) + const cid = await context.ipld.put(parent, format, { + cidVersion: options.cidVersion, + hashAlg, + hashOnly: !options.flush + }) + + return { + node: parent, + cid + } } -const recreateHamtLevel = (links, rootBucket, parentBucket, positionAtParent, callback) => { +const recreateHamtLevel = async (links, rootBucket, parentBucket, positionAtParent) => { // recreate this level of the HAMT const bucket = new Bucket({ hashFn: DirSharded.hashFn, @@ -48,14 +45,16 @@ const recreateHamtLevel = (links, rootBucket, parentBucket, positionAtParent, ca parentBucket._putObjectAt(positionAtParent, bucket) } - addLinksToHamtBucket(links, bucket, rootBucket, callback) + await addLinksToHamtBucket(links, bucket, rootBucket) + + return bucket } -const addLinksToHamtBucket = (links, bucket, rootBucket, callback) => { - Promise.all( +const addLinksToHamtBucket = async (links, bucket, rootBucket) => { + await Promise.all( links.map(link => { - if (link.name.length === 2) { - const pos = parseInt(link.name, 16) + if (link.Name.length === 2) { + const pos = parseInt(link.Name, 16) bucket._putObjectAt(pos, new Bucket({ hashFn: DirSharded.hashFn @@ -64,13 +63,12 @@ const addLinksToHamtBucket = (links, bucket, rootBucket, callback) => { return Promise.resolve() } - return (rootBucket || bucket).put(link.name.substring(2), { - size: link.size, - multihash: link.cid + return (rootBucket || bucket).put(link.Name.substring(2), { + size: link.TSize, + cid: link.Hash }) }) ) - .then(() => callback(null, bucket), callback) } const toPrefix = (position) => { @@ -81,110 +79,95 @@ const toPrefix = (position) => { .substring(0, 2) } -const generatePath = (context, fileName, rootNode, callback) => { +const generatePath = async (context, fileName, rootNode) => { // start at the root bucket and descend, loading nodes as we go - recreateHamtLevel(rootNode.links, null, null, null, async (err, rootBucket) => { - if (err) { - return callback(err) + const rootBucket = await recreateHamtLevel(rootNode.Links, null, null, null) + const position = await rootBucket._findNewBucketAndPos(fileName) + + // the path to the root bucket + let path = [{ + bucket: position.bucket, + prefix: toPrefix(position.pos) + }] + let currentBucket = position.bucket + + while (currentBucket !== rootBucket) { + path.push({ + bucket: currentBucket, + prefix: toPrefix(currentBucket._posAtParent) + }) + + currentBucket = currentBucket._parent + } + + path.reverse() + path[0].node = rootNode + + // load DAGNode for each path segment + for (let i = 0; i < path.length; i++) { + const segment = path[i] + + // find prefix in links + const link = segment.node.Links + .filter(link => link.Name.substring(0, 2) === segment.prefix) + .pop() + + // entry was not in shard + if (!link) { + // reached bottom of tree, file will be added to the current bucket + log(`Link ${segment.prefix}${fileName} will be added`) + // return path + continue + } + + // found entry + if (link.Name === `${segment.prefix}${fileName}`) { + log(`Link ${segment.prefix}${fileName} will be replaced`) + // file already existed, file will be added to the current bucket + // return path + continue } - const position = await rootBucket._findNewBucketAndPos(fileName) + // found subshard + log(`Found subshard ${segment.prefix}`) + const node = await context.ipld.get(link.Hash) + + // subshard hasn't been loaded, descend to the next level of the HAMT + if (!path[i + 1]) { + log(`Loaded new subshard ${segment.prefix}`) - // the path to the root bucket - let path = [{ - bucket: position.bucket, - prefix: toPrefix(position.pos) - }] - let currentBucket = position.bucket + await recreateHamtLevel(node.Links, rootBucket, segment.bucket, parseInt(segment.prefix, 16)) + const position = await rootBucket._findNewBucketAndPos(fileName) - while (currentBucket !== rootBucket) { + // i-- path.push({ - bucket: currentBucket, - prefix: toPrefix(currentBucket._posAtParent) + bucket: position.bucket, + prefix: toPrefix(position.pos), + node: node }) - currentBucket = currentBucket._parent + continue } - path[path.length - 1].node = rootNode - - let index = path.length - - // load DAGNode for each path segment - whilst( - () => index > 0, - (next) => { - index-- - - const segment = path[index] - - // find prefix in links - const link = segment.node.links - .filter(link => link.name.substring(0, 2) === segment.prefix) - .pop() - - if (!link) { - // reached bottom of tree, file will be added to the current bucket - log(`Link ${segment.prefix}${fileName} will be added`) - return next(null, path) - } - - if (link.name === `${segment.prefix}${fileName}`) { - log(`Link ${segment.prefix}${fileName} will be replaced`) - // file already existed, file will be added to the current bucket - return next(null, path) - } - - // found subshard - log(`Found subshard ${segment.prefix}`) - context.ipld.get(link.cid, (err, result) => { - if (err) { - return next(err) - } - - // subshard hasn't been loaded, descend to the next level of the HAMT - if (!path[index - 1]) { - log(`Loaded new subshard ${segment.prefix}`) - const node = result.value - - return recreateHamtLevel(node.links, rootBucket, segment.bucket, parseInt(segment.prefix, 16), async (err, bucket) => { - if (err) { - return next(err) - } - - const position = await rootBucket._findNewBucketAndPos(fileName) - - index++ - path.unshift({ - bucket: position.bucket, - prefix: toPrefix(position.pos), - node: node - }) - - next() - }) - } - - const nextSegment = path[index - 1] - - // add intermediate links to bucket - addLinksToHamtBucket(result.value.links, nextSegment.bucket, rootBucket, (error) => { - nextSegment.node = result.value - - next(error) - }) - }) - }, - async (err, path) => { - await rootBucket.put(fileName, true) - - callback(err, { rootBucket, path }) - } - ) - }) + const nextSegment = path[i + 1] + + // add intermediate links to bucket + await addLinksToHamtBucket(node.Links, nextSegment.bucket, rootBucket) + + nextSegment.node = node + } + + await rootBucket.put(fileName, true) + + path.reverse() + + return { + rootBucket, + path + } } -const createShard = (context, contents, options, callback) => { +const createShard = async (context, contents, options) => { const shard = new DirSharded({ root: true, dir: true, @@ -192,30 +175,17 @@ const createShard = (context, contents, options, callback) => { parentKey: null, path: '', dirty: true, - flat: false, - - ...options - }) + flat: false + }, options) - const operations = contents.map(contents => { - return (cb) => { - shard.put(contents.name, { - size: contents.size, - multihash: contents.multihash - }, cb) - } - }) - - return series( - operations, - (err) => { - if (err) { - return callback(err) - } + for (let i = 0; i < contents.length; i++) { + await shard._bucket.put(contents[i].name, { + size: contents[i].size, + cid: contents[i].cid + }) + } - shard.flush('', context.ipld, null, callback) - } - ) + return last(shard.flush('', context.ipld, null)) } module.exports = { diff --git a/src/core/utils/index.js b/src/core/utils/index.js deleted file mode 100644 index 43e2299ed7..0000000000 --- a/src/core/utils/index.js +++ /dev/null @@ -1,29 +0,0 @@ -'use strict' - -const constants = require('./constants') - -module.exports = { - addLink: require('./add-link'), - countStreamBytes: require('./count-stream-bytes'), - createLock: require('./create-lock'), - createNode: require('./create-node'), - formatCid: require('./format-cid'), - limitStreamBytes: require('./limit-stream-bytes'), - loadNode: require('./load-node'), - removeLink: require('./remove-link'), - toMfsPath: require('./to-mfs-path'), - toPathComponents: require('./to-path-components'), - toPullSource: require('./to-pull-source'), - toSourcesAndDestination: require('./to-sources-and-destination'), - toSources: require('./to-sources'), - toTrail: require('./to-trail'), - updateMfsRoot: require('./update-mfs-root'), - updateTree: require('./update-tree'), - withMfsRoot: require('./with-mfs-root'), - zeros: require('./zeros'), - - FILE_SEPARATOR: constants.FILE_SEPARATOR, - MAX_CHUNK_SIZE: constants.MAX_CHUNK_SIZE, - MAX_LINKS: constants.MAX_LINKS, - FILE_TYPES: constants.FILE_TYPES -} diff --git a/src/core/utils/limit-stream-bytes.js b/src/core/utils/limit-stream-bytes.js deleted file mode 100644 index 40094a6cde..0000000000 --- a/src/core/utils/limit-stream-bytes.js +++ /dev/null @@ -1,24 +0,0 @@ -'use strict' - -const asyncMap = require('pull-stream/throughs/async-map') - -const limitStreamBytes = (limit) => { - let bytesRead = 0 - - return asyncMap((buffer, cb) => { - if (bytesRead > limit) { - return cb(true) // eslint-disable-line standard/no-callback-literal - } - - // If we only need to return part of this buffer, slice it to make it smaller - if (bytesRead + buffer.length > limit) { - buffer = buffer.slice(0, limit - bytesRead) - } - - bytesRead = bytesRead + buffer.length - - cb(null, buffer) - }) -} - -module.exports = limitStreamBytes diff --git a/src/core/utils/load-node.js b/src/core/utils/load-node.js deleted file mode 100644 index 2572a33fcb..0000000000 --- a/src/core/utils/load-node.js +++ /dev/null @@ -1,21 +0,0 @@ -'use strict' - -const waterfall = require('async/waterfall') -const CID = require('cids') -const log = require('debug')('ipfs:mfs:utils:load-node') - -const loadNode = (context, dagLink, callback) => { - const cid = new CID(dagLink.cid) - - log(`Loading DAGNode for child ${cid.toBaseEncodedString()}`) - - waterfall([ - (cb) => context.ipld.get(cid, cb), - (result, cb) => cb(null, { - node: result.value, - cid - }) - ], callback) -} - -module.exports = loadNode diff --git a/src/core/utils/remove-link.js b/src/core/utils/remove-link.js index f795c23547..778a36bd7f 100644 --- a/src/core/utils/remove-link.js +++ b/src/core/utils/remove-link.js @@ -4,7 +4,6 @@ const { DAGNode, DAGLink } = require('ipld-dag-pb') -const waterfall = require('async/waterfall') const CID = require('cids') const log = require('debug')('ipfs:mfs:core:utils:remove-link') const UnixFS = require('ipfs-unixfs') @@ -12,170 +11,134 @@ const { generatePath, updateHamtDirectory } = require('./hamt-utils') +const errCode = require('err-code') +const mc = require('multicodec') +const mh = require('multihashes') -const defaultOptions = { - parent: undefined, - parentCid: undefined, - name: '', - flush: true, - cidVersion: 0, - hashAlg: 'sha2-256', - codec: 'dag-pb', - shardSplitThreshold: 1000 -} - -const removeLink = (context, options, callback) => { - options = Object.assign({}, defaultOptions, options) - - if (!options.parentCid) { - return callback(new Error('No parent CID passed to removeLink')) +const removeLink = async (context, options) => { + if (!options.parentCid && !options.parent) { + throw errCode(new Error('No parent node or CID passed to removeLink'), 'EINVALIDPARENT') } - if (!CID.isCID(options.parentCid)) { - return callback(new Error('Invalid CID passed to addLink')) + if (options.parentCid && !CID.isCID(options.parentCid)) { + throw errCode(new Error('Invalid CID passed to removeLink'), 'EINVALIDPARENTCID') } if (!options.parent) { - log('Loading parent node', options.parentCid.toBaseEncodedString()) - - return waterfall([ - (cb) => context.ipld.get(options.parentCid, cb), - (result, cb) => cb(null, result.value), - (node, cb) => removeLink(context, { - ...options, - parent: node - }, cb) - ], callback) + log(`Loading parent node ${options.parentCid}`) + + options.parent = await context.ipld.get(options.parentCid) } if (!options.name) { - return callback(new Error('No child name passed to removeLink')) + throw errCode(new Error('No child name passed to removeLink'), 'EINVALIDCHILDNAME') } - const meta = UnixFS.unmarshal(options.parent.data) + const meta = UnixFS.unmarshal(options.parent.Data) if (meta.type === 'hamt-sharded-directory') { log(`Removing ${options.name} from sharded directory`) - return removeFromShardedDirectory(context, options, callback) + return removeFromShardedDirectory(context, options) } log(`Removing link ${options.name} regular directory`) - return removeFromDirectory(context, options, callback) + return removeFromDirectory(context, options) } -const removeFromDirectory = (context, options, callback) => { - waterfall([ - (cb) => DAGNode.rmLink(options.parent, options.name, cb), - (newParentNode, cb) => { - context.ipld.put(newParentNode, { - version: options.cidVersion, - format: options.codec, - hashAlg: options.hashAlg - }, (error, cid) => cb(error, { - node: newParentNode, - cid - })) - }, - (result, cb) => { - log('Updated regular directory', result.cid.toBaseEncodedString()) - - cb(null, result) - } - ], callback) +const removeFromDirectory = async (context, options) => { + const format = mc[options.format.toUpperCase().replace(/-/g, '_')] + const hashAlg = mh.names[options.hashAlg] + + const newParentNode = await DAGNode.rmLink(options.parent, options.name) + const cid = await context.ipld.put(newParentNode, format, { + cidVersion: options.cidVersion, + hashAlg + }) + + log(`Updated regular directory ${cid}`) + + return { + node: newParentNode, + cid + } } -const removeFromShardedDirectory = (context, options, callback) => { - return waterfall([ - (cb) => generatePath(context, options.name, options.parent, cb), - ({ rootBucket, path }, cb) => { - rootBucket.del(options.name) - .then(() => cb(null, { rootBucket, path }), cb) - }, - ({ rootBucket, path }, cb) => { - updateShard(context, path, { - name: options.name, - cid: options.cid, - size: options.size - }, options, (err, result = {}) => cb(err, { rootBucket, ...result })) - }, - ({ rootBucket, node }, cb) => updateHamtDirectory(context, node.links, rootBucket, options, cb) - ], callback) +const removeFromShardedDirectory = async (context, options) => { + const { + rootBucket, path + } = await generatePath(context, options.name, options.parent) + + await rootBucket.del(options.name) + + const { + node + } = await updateShard(context, path, { + name: options.name, + cid: options.cid, + size: options.size, + hashAlg: options.hashAlg, + format: options.format, + cidVersion: options.cidVersion, + flush: options.flush + }, options) + + return updateHamtDirectory(context, node.Links, rootBucket, options) } -const updateShard = (context, positions, child, options, callback) => { +const updateShard = async (context, positions, child, options) => { const { bucket, prefix, node } = positions.pop() - const link = node.links - .find(link => link.name.substring(0, 2) === prefix) + const link = node.Links + .find(link => link.Name.substring(0, 2) === prefix) if (!link) { - return callback(new Error(`No link found with prefix ${prefix} for file ${child.name}`)) + throw errCode(new Error(`No link found with prefix ${prefix} for file ${child.name}`), 'ERR_NOT_FOUND') + } + + if (link.Name === `${prefix}${child.name}`) { + log(`Removing existing link ${link.Name}`) + + const newNode = await DAGNode.rmLink(node, link.Name) + + await bucket.del(child.name) + + return updateHamtDirectory(context, newNode.Links, bucket, options) + } + + log(`Descending into sub-shard ${link.Name} for ${prefix}${child.name}`) + + const result = await updateShard(context, positions, child, options) + + let newName = prefix + + if (result.node.Links.length === 1) { + log(`Removing subshard for ${prefix}`) + + // convert shard back to normal dir + result.cid = result.node.Links[0].Hash + result.node = result.node.Links[0] + + newName = `${prefix}${result.node.Name.substring(2)}` } - return waterfall([ - (cb) => { - if (link.name === `${prefix}${child.name}`) { - log(`Removing existing link ${link.name}`) - - return waterfall([ - (done) => DAGNode.rmLink(node, link.name, done), - (node, done) => { - context.ipld.put(node, { - version: options.cidVersion, - format: options.codec, - hashAlg: options.hashAlg, - hashOnly: !options.flush - }, (error, cid) => done(error, { - node, - cid - })) - }, - (result, done) => { - bucket.del(child.name) - .then(() => done(null, result), done) - }, - (result, done) => updateHamtDirectory(context, result.node.links, bucket, options, done) - ], cb) - } - - log(`Descending into sub-shard ${link.name} for ${prefix}${child.name}`) - - return waterfall([ - (cb) => updateShard(context, positions, child, options, cb), - (result, cb) => { - let newName = prefix - - if (result.node.links.length === 1) { - log(`Removing subshard for ${prefix}`) - - // convert shard back to normal dir - result.cid = result.node.links[0].cid - result.node = result.node.links[0] - - newName = `${prefix}${result.node.name.substring(2)}` - } - - log(`Updating shard ${prefix} with name ${newName}`) - - updateShardParent(context, bucket, node, prefix, newName, result.node.size, result.cid, options, cb) - } - ], cb) - } - ], callback) + log(`Updating shard ${prefix} with name ${newName}`) + + const size = DAGNode.isDAGNode(result.node) ? result.node.size : result.node.Tsize + + return updateShardParent(context, bucket, node, prefix, newName, size, result.cid, options) } -const updateShardParent = async (context, bucket, parent, oldName, newName, size, cid, options, callback) => { - waterfall([ - (done) => DAGNode.rmLink(parent, oldName, done), - (parent, done) => DAGNode.addLink(parent, new DAGLink(newName, size, cid), done), - (parent, done) => updateHamtDirectory(context, parent.links, bucket, options, done) - ], callback) +const updateShardParent = async (context, bucket, parent, oldName, newName, size, cid, options) => { + parent = await DAGNode.rmLink(parent, oldName) + parent = await DAGNode.addLink(parent, new DAGLink(newName, size, cid)) + + return updateHamtDirectory(context, parent.Links, bucket, options) } module.exports = removeLink diff --git a/src/core/utils/to-async-iterator.js b/src/core/utils/to-async-iterator.js new file mode 100644 index 0000000000..d30b60782b --- /dev/null +++ b/src/core/utils/to-async-iterator.js @@ -0,0 +1,91 @@ +'use strict' + +const errCode = require('err-code') +const fs = require('fs') +const log = require('debug')('ipfs:mfs:utils:to-async-iterator') +const { + MAX_CHUNK_SIZE +} = require('./constants') + +const toAsyncIterator = async (content) => { + if (!content) { + throw errCode(new Error('paths must start with a leading /'), 'ERR_INVALID_PATH') + } + + if (typeof content === 'string' || content instanceof String) { + // Paths, node only + log('Content was a path') + + return fs.createReadStream(content) + } + + if (content.length) { + log('Content was array-like') + + return { + [Symbol.asyncIterator]: async function * bufferContent () { + yield content + } + } + } + + if (content[Symbol.asyncIterator]) { + log('Content was an async iterator') + return content + } + + if (content[Symbol.iterator]) { + log('Content was an iterator') + return content + } + + if (global.Blob && content instanceof global.Blob) { + // HTML5 Blob objects (including Files) + log('Content was an HTML5 Blob') + + let index = 0 + + const iterator = { + next: async () => { + if (index > content.size) { + return { + done: true + } + } + + return new Promise((resolve, reject) => { + const chunk = content.slice(index, MAX_CHUNK_SIZE) + index += MAX_CHUNK_SIZE + + const reader = new global.FileReader() + + const handleLoad = (ev) => { + reader.removeEventListener('loadend', handleLoad, false) + + if (ev.error) { + return reject(ev.error) + } + + resolve({ + done: false, + value: Buffer.from(reader.result) + }) + } + + reader.addEventListener('loadend', handleLoad) + reader.readAsArrayBuffer(chunk) + }) + } + } + + return { + [Symbol.asyncIterator]: () => { + return iterator + } + } + } + + throw errCode(new Error(`Don't know how to convert ${content} into an async iterator`), 'ERR_INVALID_PARAMS') +} + +module.exports = toAsyncIterator diff --git a/src/core/utils/to-mfs-path.js b/src/core/utils/to-mfs-path.js index 49c7a926af..cee9b5032d 100644 --- a/src/core/utils/to-mfs-path.js +++ b/src/core/utils/to-mfs-path.js @@ -3,98 +3,103 @@ const { FILE_SEPARATOR } = require('./constants') -const withMfsRoot = require('./with-mfs-root') -const waterfall = require('async/waterfall') -const parallel = require('async/parallel') +const loadMfsRoot = require('./with-mfs-root') const toPathComponents = require('./to-path-components') +const exporter = require('ipfs-unixfs-exporter') +const errCode = require('err-code') const IPFS_PREFIX = 'ipfs' -const toMfsPath = (context, path, callback) => { +const toMfsPath = async (context, path) => { let outputArray = Array.isArray(path) - const paths = Array.isArray(path) ? path : [path] - - waterfall([ - (cb) => { - parallel({ - paths: (done) => { - let p - try { - p = paths.map(path => { - path = (path || '').trim() - path = path.replace(/(\/\/+)/g, '/') - - if (!path) { - throw new Error('paths must not be empty') - } - - if (path.substring(0, 1) !== FILE_SEPARATOR) { - throw new Error(`paths must start with a leading ${FILE_SEPARATOR}`) - } - - if (path.substring(path.length - FILE_SEPARATOR.length) === FILE_SEPARATOR) { - path = path.substring(0, path.length - FILE_SEPARATOR.length) - } - - return toPathComponents(path) - }) - } catch (err) { - return done(err) - } - - done(null, p) - }, - root: (done) => withMfsRoot(context, done) - }, cb) - }, - ({ paths, root }, cb) => { - cb(null, paths.map(parts => { - if (parts[0] === IPFS_PREFIX) { - let mfsDirectory - - if (parts.length === 2) { - mfsDirectory = `${FILE_SEPARATOR}${parts.join(FILE_SEPARATOR)}` - } else { - mfsDirectory = `${FILE_SEPARATOR}${parts.slice(0, parts.length - 1).join(FILE_SEPARATOR)}` - } - - return { - type: 'ipfs', - depth: parts.length - 2, - - mfsPath: `${FILE_SEPARATOR}${parts.join(FILE_SEPARATOR)}`, - mfsDirectory, - root, - parts, - path: `${FILE_SEPARATOR}${parts.join(FILE_SEPARATOR)}`, - name: parts[parts.length - 1] - } - } + let paths = Array.isArray(path) ? path : [path] + const root = await loadMfsRoot(context) - const mfsPath = `/${IPFS_PREFIX}/${root.toBaseEncodedString()}/${parts.join(FILE_SEPARATOR)}` - const mfsDirectory = `/${IPFS_PREFIX}/${root.toBaseEncodedString()}/${parts.slice(0, parts.length - 1).join(FILE_SEPARATOR)}` + paths = paths.map(path => { + path = (path || '').trim() + path = path.replace(/(\/\/+)/g, '/') - return { - type: 'mfs', - depth: parts.length, + if (path.endsWith('/') && path.length > 1) { + path = path.substring(0, path.length - 1) + } - mfsDirectory, - mfsPath, - root, - parts, - path: `${FILE_SEPARATOR}${parts.join(FILE_SEPARATOR)}`, - name: parts[parts.length - 1] - } - })) - }, - (mfsPaths, cb) => { - if (outputArray) { - return cb(null, mfsPaths) + if (!path) { + throw errCode(new Error('paths must not be empty'), 'ERR_NO_PATH') + } + + if (path.substring(0, 1) !== FILE_SEPARATOR) { + throw errCode(new Error(`paths must start with a leading ${FILE_SEPARATOR}`), 'ERR_INVALID_PATH') + } + + if (path.substring(path.length - FILE_SEPARATOR.length) === FILE_SEPARATOR) { + path = path.substring(0, path.length - FILE_SEPARATOR.length) + } + + const pathComponents = toPathComponents(path) + + if (pathComponents[0] === IPFS_PREFIX) { + // e.g. /ipfs/QMfoo or /ipfs/Qmfoo/sub/path + let mfsDirectory + + if (pathComponents.length === 2) { + mfsDirectory = `${FILE_SEPARATOR}${pathComponents.join(FILE_SEPARATOR)}` + } else { + mfsDirectory = `${FILE_SEPARATOR}${pathComponents.slice(0, pathComponents.length - 1).join(FILE_SEPARATOR)}` } - cb(null, mfsPaths[0]) + return { + type: 'ipfs', + depth: pathComponents.length - 2, + + mfsPath: `${FILE_SEPARATOR}${pathComponents.join(FILE_SEPARATOR)}`, + mfsDirectory, + parts: pathComponents, + path: `${FILE_SEPARATOR}${pathComponents.join(FILE_SEPARATOR)}`, + name: pathComponents[pathComponents.length - 1] + } + } + + const mfsPath = `/${IPFS_PREFIX}/${root}${pathComponents.length ? '/' + pathComponents.join(FILE_SEPARATOR) : ''}` + const mfsDirectory = `/${IPFS_PREFIX}/${root}/${pathComponents.slice(0, pathComponents.length - 1).join(FILE_SEPARATOR)}` + + return { + type: 'mfs', + depth: pathComponents.length, + + mfsDirectory, + mfsPath, + parts: pathComponents, + path: `${FILE_SEPARATOR}${pathComponents.join(FILE_SEPARATOR)}`, + name: pathComponents[pathComponents.length - 1] } - ], callback) + }) + + await Promise.all( + paths.map(async (path) => { + const cidPath = path.type === 'mfs' ? path.mfsPath : path.path + + try { + const res = await exporter(cidPath, context.ipld) + + path.cid = res.cid + path.mfsPath = `/ipfs/${res.path}` + path.unixfs = res.unixfs + path.content = res.content + } catch (err) { + if (err.code !== 'ERR_NOT_FOUND') { + throw err + } + } + + path.exists = Boolean(path.cid) + }) + ) + + if (outputArray) { + return paths + } + + return paths[0] } module.exports = toMfsPath diff --git a/src/core/utils/to-pull-source.js b/src/core/utils/to-pull-source.js deleted file mode 100644 index c0f2f3dbf0..0000000000 --- a/src/core/utils/to-pull-source.js +++ /dev/null @@ -1,68 +0,0 @@ -'use strict' - -const toPull = require('stream-to-pull-stream') -const isStream = require('is-stream') -const fileReaderStream = require('filereader-stream') -const isPullStream = require('is-pull-stream') -const fs = require('fs') -const values = require('pull-stream/sources/values') -const log = require('debug')('ipfs:mfs:utils:to-pull-source') -const waterfall = require('async/waterfall') - -const toPullSource = (content, options, callback) => { - if (!content) { - return callback(new Error('paths must start with a leading /')) - } - - // Buffers - if (Buffer.isBuffer(content)) { - log('Content was a buffer') - - if (!options.length && options.length !== 0) { - options.length = options.length || content.length - } - - return callback(null, values([content])) - } - - // Paths, node only - if (typeof content === 'string' || content instanceof String) { - log('Content was a path') - - // Find out the file size if options.length has not been specified - return waterfall([ - (done) => options.length ? done(null, { - size: options.length - }) : fs.stat(content, done), - (stats, done) => { - options.length = stats.size - - done(null, toPull.source(fs.createReadStream(content))) - } - ], callback) - } - - // HTML5 Blob objects (including Files) - if (global.Blob && content instanceof global.Blob) { - log('Content was an HTML5 Blob') - options.length = options.length || content.size - - content = fileReaderStream(content) - } - - // Node streams - if (isStream(content)) { - log('Content was a Node stream') - return callback(null, toPull.source(content)) - } - - // Pull stream - if (isPullStream.isSource(content)) { - log('Content was a pull-stream') - return callback(null, content) - } - - callback(new Error(`Don't know how to convert ${content} into a pull stream source`)) -} - -module.exports = toPullSource diff --git a/src/core/utils/to-sources-and-destination.js b/src/core/utils/to-sources-and-destination.js index 569037325b..b41d3c81ab 100644 --- a/src/core/utils/to-sources-and-destination.js +++ b/src/core/utils/to-sources-and-destination.js @@ -2,19 +2,19 @@ const toSources = require('./to-sources') -function toSourcesAndDestination (context, args, defaultOptions, callback) { - toSources(context, args, defaultOptions, (err, result) => { - if (err) { - return callback(err) - } +async function toSourcesAndDestination (context, args) { + const { + sources, + options + } = await toSources(context, args) - const destination = result.sources.pop() + const destination = sources.pop() - callback(null, { - destination, - ...result - }) - }) + return { + destination, + sources, + options + } } module.exports = toSourcesAndDestination diff --git a/src/core/utils/to-sources.js b/src/core/utils/to-sources.js index cc952ddc70..81c39233b4 100644 --- a/src/core/utils/to-sources.js +++ b/src/core/utils/to-sources.js @@ -2,10 +2,7 @@ const toMfsPath = require('./to-mfs-path') -function toSources (context, args, defaultOptions, callback) { - args = args.slice() - const options = Object.assign({}, defaultOptions, args.filter(arg => typeof arg === 'object').pop() || {}) - +async function toSources (context, args) { // Support weird mfs.mv([source, dest], options, callback) signature if (Array.isArray(args[0])) { args = args[0] @@ -15,12 +12,9 @@ function toSources (context, args, defaultOptions, callback) { .filter(arg => typeof arg === 'string') .map(source => source.trim()) - toMfsPath(context, sources, (err, sources) => { - callback(err, { - sources, - options - }) - }) + return { + sources: await toMfsPath(context, sources) + } } module.exports = toSources diff --git a/src/core/utils/to-trail.js b/src/core/utils/to-trail.js index 25a3c742c3..92632db03e 100644 --- a/src/core/utils/to-trail.js +++ b/src/core/utils/to-trail.js @@ -1,68 +1,23 @@ 'use strict' -const toPathComponents = require('./to-path-components') const exporter = require('ipfs-unixfs-exporter') -const pull = require('pull-stream/pull') -const filter = require('pull-stream/throughs/filter') -const map = require('pull-stream/throughs/map') -const collect = require('pull-stream/sinks/collect') const log = require('debug')('ipfs:mfs:utils:to-trail') -const toTrail = (context, path, options, callback) => { - const toExport = toPathComponents(path) - .slice(1) - const finalPath = `/${toExport - .slice(1) - .join('/')}` +const toTrail = async (context, path) => { + log(`Creating trail for path ${path}`) - let depth = 0 + const output = [] - log(`Creating trail for path ${path} ${toExport}`) + for await (const fsEntry of exporter.path(path, context.ipld)) { + output.push({ + name: fsEntry.name, + cid: fsEntry.cid, + size: fsEntry.node.size, + type: fsEntry.unixfs.type + }) + } - let exported = '' - - pull( - exporter(path, context.ipld, { - fullPath: true, - maxDepth: toExport.length - 1 - }), - // find the directory from each level in the filesystem - filter(node => { - log(`Saw node ${node.name} for segment ${toExport[depth]} at depth ${node.depth}`) - - if (node.name === toExport[depth]) { - depth++ - - return true - } - - return false - }), - // load DAGNode for the containing folder - map((node) => { - let currentPath = '/' - let name = currentPath - - if (exported) { - currentPath = `${exported === '/' ? '' : exported}/${toExport[node.depth]}` - name = node.name - } - - exported = currentPath - - if (exported !== finalPath && node.type !== 'dir') { - throw new Error(`cannot access ${exported}: Not a directory ${finalPath}`) - } - - return { - name, - cid: node.cid, - size: node.size, - type: node.type - } - }), - collect(callback) - ) + return output } module.exports = toTrail diff --git a/src/core/utils/update-mfs-root.js b/src/core/utils/update-mfs-root.js index 3dac62010b..f29b600769 100644 --- a/src/core/utils/update-mfs-root.js +++ b/src/core/utils/update-mfs-root.js @@ -1,20 +1,16 @@ 'use strict' -const log = require('debug')('ipfs:mfs:utils:update-mfs:root') -const waterfall = require('async/waterfall') -const CID = require('cids') +const log = require('debug')('ipfs:mfs:utils:update-mfs-root') const { MFS_ROOT_KEY } = require('./constants') -const updateMfsRoot = (context, buffer, callback) => { - const cid = new CID(buffer) +const updateMfsRoot = async (context, cid) => { + log(`New MFS root will be ${cid}`) - log(`New MFS root will be ${cid.toBaseEncodedString()}`) + await context.repo.datastore.put(MFS_ROOT_KEY, cid.buffer) - waterfall([ - (cb) => context.repo.datastore.put(MFS_ROOT_KEY, cid.buffer, (error) => cb(error)) - ], (error) => callback(error, cid)) + return cid } module.exports = updateMfsRoot diff --git a/src/core/utils/update-tree.js b/src/core/utils/update-tree.js index 5797dd9a33..a832c6516b 100644 --- a/src/core/utils/update-tree.js +++ b/src/core/utils/update-tree.js @@ -1,54 +1,60 @@ 'use strict' -const waterfall = require('async/waterfall') -const reduceRight = require('async/reduceRight') +const log = require('debug')('ipfs:mfs:utils:update-tree') const addLink = require('./add-link') const defaultOptions = { shardSplitThreshold: 1000 } -const updateTree = (context, trail, options, callback) => { +// loop backwards through the trail, replacing links of all components to update CIDs +const updateTree = async (context, trail, options) => { options = Object.assign({}, defaultOptions, options) - waterfall([ - (cb) => context.ipld.getMany(trail.map(node => node.cid), cb), - (nodes, cb) => { - let index = trail.length - 1 - - reduceRight(trail, null, (child, node, done) => { - const dagNode = nodes[index] - const cid = trail[index].cid - index-- - - if (!child) { - // first item in the list - return done(null, node) - } - - addLink(context, { - parent: dagNode, - parentCid: cid, - name: child.name, - cid: child.cid, - size: child.size, - flush: options.flush, - shardSplitThreshold: options.shardSplitThreshold - }, (err, result) => { - if (err) { - return done(err) - } - - done(err, { - cid: result.cid, - node: result.node, - name: node.name, - size: result.node.size - }) - }) - }, cb) + log('Trail', trail) + trail = trail.slice().reverse() + + let index = 0 + let child + + for await (const node of context.ipld.getMany(trail.map(node => node.cid))) { + const cid = trail[index].cid + const name = trail[index].name + index++ + + if (!child) { + child = { + cid, + name, + size: node.size + } + + continue } - ], callback) + + const result = await addLink(context, { + parent: node, + name: child.name, + cid: child.cid, + size: child.size, + flush: options.flush, + shardSplitThreshold: options.shardSplitThreshold, + format: options.format, + hashAlg: options.hashAlg, + cidVersion: options.cidVersion + }) + + // new child for next loop + child = { + cid: result.cid, + name, + size: result.node.size + } + } + + log(`Final CID ${child.cid}`) + + return child.cid } module.exports = updateTree diff --git a/src/core/utils/with-mfs-root.js b/src/core/utils/with-mfs-root.js index 9dcedd2051..992e823b8b 100644 --- a/src/core/utils/with-mfs-root.js +++ b/src/core/utils/with-mfs-root.js @@ -6,47 +6,42 @@ const { DAGNode } = require('ipld-dag-pb') const log = require('debug')('ipfs:mfs:utils:with-mfs-root') -const waterfall = require('async/waterfall') +const mc = require('multicodec') +const mh = require('multihashes') const { MFS_ROOT_KEY } = require('./constants') -const withMfsRoot = (context, callback) => { - waterfall([ - // Open the repo if it's been closed - (cb) => context.repo.datastore.open((error) => cb(error)), - (cb) => { - // Load the MFS root CID - context.repo.datastore.get(MFS_ROOT_KEY, (error, result) => { - // Once datastore-level releases its error.code addition, we can remove error.notFound logic - if (error && (error.notFound || error.code === 'ERR_NOT_FOUND')) { - log('Creating new MFS root') - - return waterfall([ - // Store an empty node as the root - (next) => DAGNode.create(new UnixFs('directory').marshal(), next), - (node, next) => context.ipld.put(node, { - version: 0, - hashAlg: 'sha2-256', - format: 'dag-pb' - }, next), - // Store the Buffer in the datastore - (cid, next) => context.repo.datastore.put(MFS_ROOT_KEY, cid.buffer, (error) => next(error, cid)) - ], cb) - } - - cb(error, result ? new CID(result) : null) - }) - }, - // Turn the Buffer into a CID - (cid, cb) => { - log(`Fetched MFS root ${cid.toBaseEncodedString()}`) - - cb(null, cid) +const loadMfsRoot = async (context) => { + // Open the repo if it's been closed + await context.repo.datastore.open() + + // Load the MFS root CID + let cid + + try { + const buf = await context.repo.datastore.get(MFS_ROOT_KEY) + + cid = new CID(buf) + } catch (err) { + if (err.code !== 'ERR_NOT_FOUND') { + throw err } - // Invoke the API function with the root CID - ], callback) + + log('Creating new MFS root') + const node = DAGNode.create(new UnixFs('directory').marshal()) + cid = await context.ipld.put(node, mc.DAG_PB, { + cidVersion: 0, + hashAlg: mh.names['sha2-256'] // why can't ipld look this up? + }) + + await context.repo.datastore.put(MFS_ROOT_KEY, cid.buffer) + } + + log(`Loaded MFS root /ipfs/${cid}`) + + return cid } -module.exports = withMfsRoot +module.exports = loadMfsRoot diff --git a/src/core/utils/zeros.js b/src/core/utils/zeros.js deleted file mode 100644 index 6a27aeb2d9..0000000000 --- a/src/core/utils/zeros.js +++ /dev/null @@ -1,31 +0,0 @@ -'use strict' - -// A pull stream source that will emit buffers full of zeros up to the specified length -const zeros = (max = Infinity, increment = 4096) => { - let i = 0 - - return (end, cb) => { - if (end) { - return cb && cb(end) - } - - if (i >= max) { - // Ugh. https://github.com/standard/standard/issues/623 - const foo = true - return cb(foo) - } - - let nextLength = increment - - if ((i + nextLength) > max) { - // final chunk doesn't divide neatly into increment - nextLength = max - i - } - - i += nextLength - - cb(null, Buffer.alloc(nextLength, 0)) - } -} - -module.exports = zeros diff --git a/src/core/write.js b/src/core/write.js index 1b258d30d1..014a708792 100644 --- a/src/core/write.js +++ b/src/core/write.js @@ -1,39 +1,23 @@ 'use strict' -const promisify = require('promisify-es6') -const waterfall = require('async/waterfall') -const parallel = require('async/parallel') -const series = require('async/series') -const { - createLock, - updateMfsRoot, - addLink, - updateTree, - toMfsPath, - toPathComponents, - toPullSource, - loadNode, - limitStreamBytes, - countStreamBytes, - toTrail, - zeros -} = require('./utils') -const { - unmarshal -} = require('ipfs-unixfs') -const pull = require('pull-stream/pull') -const cat = require('pull-cat') -const collect = require('pull-stream/sinks/collect') -const empty = require('pull-stream/sources/empty') -const err = require('pull-stream/sources/error') const log = require('debug')('ipfs:mfs:write') -const values = require('pull-stream/sources/values') -const exporter = require('ipfs-unixfs-exporter') const importer = require('ipfs-unixfs-importer') -const deferred = require('pull-defer') -const CID = require('cids') const stat = require('./stat') const mkdir = require('./mkdir') +const addLink = require('./utils/add-link') +const applyDefaultOptions = require('./utils/apply-default-options') +const createLock = require('./utils/create-lock') +const toAsyncIterator = require('./utils/to-async-iterator') +const toMfsPath = require('./utils/to-mfs-path') +const toPathComponents = require('./utils/to-path-components') +const toTrail = require('./utils/to-trail') +const updateTree = require('./utils/update-tree') +const updateMfsRoot = require('./utils/update-mfs-root') +const errCode = require('err-code') +const { + MAX_CHUNK_SIZE +} = require('./utils/constants') +const last = require('async-iterator-last') const defaultOptions = { offset: 0, // the offset in the file to begin writing @@ -53,187 +37,90 @@ const defaultOptions = { shardSplitThreshold: 1000 } -module.exports = function mfsWrite (context) { - return promisify((path, content, options, callback) => { - if (typeof options === 'function') { - callback = options - options = {} +module.exports = (context) => { + return async function mfsWrite (path, content, options) { + log('Hello world, writing', path, content, options) + options = applyDefaultOptions(options, defaultOptions) + + let source, destination, parent + log('Reading source, destination and parent') + await createLock().readLock(async () => { + source = await toAsyncIterator(content, options) + destination = await toMfsPath(context, path) + parent = await toMfsPath(context, destination.mfsDirectory) + })() + log('Read source, destination and parent') + if (!options.parents && !parent.exists) { + throw errCode(new Error('directory does not exist'), 'ERR_NO_EXIST') + } + + if (!options.create && !destination.exists) { + throw errCode(new Error('file does not exist'), 'ERR_NO_EXIST') } - options = Object.assign({}, defaultOptions, options) + return updateOrImport(context, path, source, destination, options) + } +} - if (options.offset < 0) { - return callback(new Error('cannot have negative write offset')) +const updateOrImport = async (context, path, source, destination, options) => { + const child = await write(context, source, destination, options) + + // The slow bit is done, now add or replace the DAGLink in the containing directory + // re-reading the path to the containing folder in case it has changed in the interim + await createLock().writeLock(async () => { + const pathComponents = toPathComponents(path) + const fileName = pathComponents.pop() + let parentExists = false + + try { + await stat(context)(`/${pathComponents.join('/')}`, options) + parentExists = true + } catch (err) { + if (err.code !== 'ERR_NOT_FOUND') { + throw err + } } - if (options.length < 0) { - return callback(new Error('cannot have negative byte count')) + if (!parentExists) { + await mkdir(context)(`/${pathComponents.join('/')}`, options) } - if (!options.length && options.length !== 0) { - options.length = Infinity + // get an updated mfs path in case the root changed while we were writing + const updatedPath = await toMfsPath(context, path) + const trail = await toTrail(context, updatedPath.mfsDirectory, options) + const parent = trail[trail.length - 1] + + if (!parent.type.includes('directory')) { + throw errCode(new Error(`cannot write to ${parent.name}: Not a directory`), 'ERR_NOT_A_DIRECTORY') } - options.cidVersion = options.cidVersion || 0 - - waterfall([ - (done) => { - createLock().readLock((callback) => { - waterfall([ - (done) => { - parallel({ - source: (next) => toPullSource(content, options, next), - path: (next) => toMfsPath(context, path, next) - }, done) - }, - ({ source, path: { mfsPath, mfsDirectory } }, done) => { - series({ - mfsDirectory: (next) => stat(context)(mfsDirectory, { - unsorted: true, - long: true - }, (error, result) => { - if (error && error.message.includes('does not exist')) { - error = null - } - - next(error, result) - }), - mfsPath: (next) => stat(context)(mfsPath, { - unsorted: true, - long: true - }, (error, result) => { - if (error && error.message.includes('does not exist')) { - error = null - } - - next(error, result) - }) - }, (error, result = {}) => { - done(error, { - source, - path, - mfsDirectory: result.mfsDirectory, - mfsPath: result.mfsPath - }) - }) - } - ], callback) - })(done) - }, - ({ source, path, mfsDirectory, mfsPath }, done) => { - if (!options.parents && !mfsDirectory) { - return done(new Error('directory does not exist')) - } + const parentNode = await context.ipld.get(parent.cid) - if (!options.create && !mfsPath) { - return done(new Error('file does not exist')) - } + const result = await addLink(context, { + parent: parentNode, + name: fileName, + cid: child.cid, + size: child.size, + flush: options.flush, + shardSplitThreshold: options.shardSplitThreshold, + format: options.format, + hashAlg: options.hashAlg, + cidVersion: options.cidVersion + }) - updateOrImport(context, options, path, source, mfsPath, done) - } - ], (error) => callback(error)) - }) -} + parent.cid = result.cid -const updateOrImport = (context, options, path, source, existingChild, callback) => { - waterfall([ - (next) => { - if (existingChild) { - return loadNode(context, { - cid: existingChild.hash - }, next) - } + // update the tree with the new child + const newRootCid = await updateTree(context, trail, options) - next(null, null) - }, - - (result, next) => { - const { - cid, node - } = result || {} - - write(context, cid, node, source, options, next) - }, - - // The slow bit is done, now add or replace the DAGLink in the containing directory - // re-reading the path to the containing folder in case it has changed in the interim - (child, next) => { - createLock().writeLock((writeLockCallback) => { - const pathComponents = toPathComponents(path) - const fileName = pathComponents.pop() - - waterfall([ - (cb) => stat(context)(`/${pathComponents.join('/')}`, options, (error, result) => { - if (error && error.message.includes('does not exist')) { - error = null - } - - cb(null, Boolean(result)) - }), - (parentExists, cb) => { - if (parentExists) { - return cb() - } - - mkdir(context)(`/${pathComponents.join('/')}`, options, cb) - }, - // get an updated mfs path in case the root changed while we were writing - (cb) => toMfsPath(context, path, cb), - ({ mfsDirectory, root }, cb) => { - toTrail(context, mfsDirectory, options, (err, trail) => { - if (err) { - return cb(err) - } - - const parent = trail[trail.length - 1] - - if (parent.type !== 'dir') { - return cb(new Error(`cannot write to ${parent.name}: Not a directory`)) - } - - context.ipld.get(parent.cid, (err, result) => { - if (err) { - return cb(err) - } - - addLink(context, { - parent: result.value, - parentCid: parent.cid, - name: fileName, - cid: child.cid, - size: child.size, - flush: options.flush, - shardSplitThreshold: options.shardSplitThreshold - }, (err, result) => { - if (err) { - return cb(err) - } - - parent.cid = result.cid - parent.size = result.node.size - - cb(null, trail) - }) - }) - }) - }, - - // update the tree with the new child - (trail, cb) => updateTree(context, trail, options, cb), - - // Update the MFS record with the new CID for the root of the tree - ({ cid }, cb) => updateMfsRoot(context, cid, cb) - ], writeLockCallback) - })(next) - }], callback) + // Update the MFS record with the new CID for the root of the tree + await updateMfsRoot(context, newRootCid) + })() } -const write = (context, existingNodeCid, existingNode, source, options, callback) => { - let existingNodeMeta - - if (existingNode) { - existingNodeMeta = unmarshal(existingNode.data) - log(`Overwriting file ${existingNodeCid.toBaseEncodedString()} offset ${options.offset} length ${options.length}`) +const write = async (context, source, destination, options) => { + if (destination.exists) { + log(`Overwriting file ${destination.cid} offset ${options.offset} length ${options.length}`) } else { log(`Writing file offset ${options.offset} length ${options.length}`) } @@ -242,106 +129,123 @@ const write = (context, existingNodeCid, existingNode, source, options, callback // pad start of file if necessary if (options.offset > 0) { - if (existingNode && existingNodeMeta.fileSize() > options.offset) { + if (destination.unixfs && destination.unixfs.fileSize() > options.offset) { log(`Writing first ${options.offset} bytes of original file`) - const startFile = deferred.source() - - sources.push(startFile) - - pull( - exporter(existingNodeCid, context.ipld, { - offset: 0, - length: options.offset - }), - collect((error, files) => { - if (error) { - return startFile.resolve(err(error)) - } - - startFile.resolve(files[0].content) - }) + sources.push( + () => { + return destination.content({ + offset: 0, + length: options.offset + }) + } ) } else { log(`Writing zeros for first ${options.offset} bytes`) - sources.push(zeros(options.offset)) + sources.push( + asyncZeroes(options.offset) + ) } } - const endFile = deferred.source() - - // add the new source sources.push( - pull( - source, - limitStreamBytes(options.length), - countStreamBytes((bytesRead) => { - log(`Wrote ${bytesRead} bytes`) - - if (existingNode && !options.truncate) { - // if we've done reading from the new source and we are not going - // to truncate the file, add the end of the existing file to the output - const fileSize = existingNodeMeta.fileSize() - const offset = options.offset + bytesRead - - if (fileSize > offset) { - log(`Writing last ${fileSize - offset} of ${fileSize} bytes from original file`) - pull( - exporter(existingNodeCid, context.ipld, { - offset - }), - collect((error, files) => { - if (error) { - return endFile.resolve(err(error)) - } - - endFile.resolve(files[0].content) - }) - ) - } else { - log(`Not writing last bytes from original file`) - endFile.resolve(empty()) - } - } - }) - ) + limitAsyncStreamBytes(source, options.length) ) - // add the end of the file if necessary - if (existingNode && !options.truncate) { - sources.push( - endFile - ) + const content = countBytesStreamed(catAsyncInterators(sources), (bytesWritten) => { + if (destination.unixfs && !options.truncate) { + // if we've done reading from the new source and we are not going + // to truncate the file, add the end of the existing file to the output + const fileSize = destination.unixfs.fileSize() + + if (fileSize > bytesWritten) { + log(`Writing last ${fileSize - bytesWritten} of ${fileSize} bytes from original file starting at offset ${bytesWritten}`) + + return destination.content({ + offset: bytesWritten + }) + } else { + log(`Not writing last bytes from original file`) + } + } + + return { + [Symbol.asyncIterator]: async function * () {} + } + }) + + let result = await last(importer([{ + content: content + }], context.ipld, { + progress: options.progress, + hashAlg: options.hashAlg, + cidVersion: options.cidVersion, + strategy: options.strategy, + rawLeaves: options.rawLeaves, + reduceSingleLeafToSelf: options.reduceSingleLeafToSelf, + leafType: options.leafType + })) + + log(`Wrote ${result.cid}`) + + return { + cid: result.cid, + size: result.size } +} - pull( - values([{ - path: '', - content: cat(sources) - }]), - importer(context.ipld, { - progress: options.progress, - hashAlg: options.hashAlg, - cidVersion: options.cidVersion, - strategy: options.strategy, - rawLeaves: options.rawLeaves, - reduceSingleLeafToSelf: options.reduceSingleLeafToSelf, - leafType: options.leafType - }), - collect((error, results) => { - if (error) { - return callback(error) +const limitAsyncStreamBytes = (stream, limit) => { + return async function * _limitAsyncStreamBytes () { + let emitted = 0 + + for await (const buf of stream) { + emitted += buf.length + + if (emitted > limit) { + yield buf.slice(0, limit - emitted) + + return } - const result = results.pop() - const cid = new CID(result.multihash) + yield buf + } + } +} - log(`Wrote ${cid.toBaseEncodedString()}`) +const asyncZeroes = (count, chunkSize = MAX_CHUNK_SIZE) => { + const buf = Buffer.alloc(chunkSize, 0) - callback(null, { - cid, - size: result.size - }) - }) - ) + const stream = { + [Symbol.asyncIterator]: async function * _asyncZeroes () { + while (true) { + yield buf.slice() + } + } + } + + return limitAsyncStreamBytes(stream, count) +} + +const catAsyncInterators = async function * (sources) { + for (let i = 0; i < sources.length; i++) { + for await (const buf of sources[i]()) { + yield buf + } + } +} + +const countBytesStreamed = async function * (source, notify) { + let wrote = 0 + + for await (const buf of source) { + wrote += buf.length + + yield buf + } + + for await (const buf of notify(wrote)) { + wrote += buf.length + + yield buf + } } diff --git a/src/http/read.js b/src/http/read.js index e389b41ccf..5538121705 100644 --- a/src/http/read.js +++ b/src/http/read.js @@ -51,8 +51,7 @@ const mfsRead = { query: Joi.object().keys({ arg: Joi.string().required(), offset: Joi.number().integer().min(0), - length: Joi.number().integer().min(0), - count: Joi.number().integer().min(0) + length: Joi.number().integer().min(0) }) .rename('o', 'offset', { override: true, diff --git a/src/http/write.js b/src/http/write.js index 0308843c7c..37b26bd70e 100644 --- a/src/http/write.js +++ b/src/http/write.js @@ -2,6 +2,7 @@ const Joi = require('joi') const multipart = require('ipfs-multipart') +const Boom = require('boom') const mfsWrite = { method: 'POST', @@ -29,9 +30,15 @@ const mfsWrite = { const fileStream = await new Promise((resolve, reject) => { const parser = multipart.reqParser(request.payload) + let fileStream parser.on('file', (_, stream) => { - resolve(stream) + if (fileStream) { + return reject(Boom.badRequest('Please only send one file')) + } + + fileStream = stream + resolve(fileStream) }) parser.on('error', (error) => { diff --git a/src/index.js b/src/index.js index 2a29e8f876..522e8d29c5 100644 --- a/src/index.js +++ b/src/index.js @@ -5,7 +5,7 @@ const core = require('./core') const http = require('./http') const { FILE_TYPES -} = require('./core/utils') +} = require('./core/utils/constants') module.exports = { cli, diff --git a/test/cp.spec.js b/test/cp.spec.js index e75dc32401..16a99767cf 100644 --- a/test/cp.spec.js +++ b/test/cp.spec.js @@ -4,11 +4,11 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const bufferStream = require('pull-buffer-stream') -const { - createMfs, - createShardedDirectory -} = require('./helpers') +const createMfs = require('./helpers/create-mfs') +const createShardedDirectory = require('./helpers/create-sharded-directory') +const streamToBuffer = require('./helpers/stream-to-buffer') +const streamToArray = require('./helpers/stream-to-array') +const crypto = require('crypto') describe('cp', () => { let mfs @@ -44,7 +44,7 @@ describe('cp', () => { } }) - it('refuses to copy a file to a non-existent directory', async () => { + it('refuses to copy a non-existent file', async () => { try { await mfs.cp('/i-do-not-exist', '/output') throw new Error('No error was thrown for a non-existent file') @@ -57,16 +57,16 @@ describe('cp', () => { const source = `/source-file-${Math.random()}.txt` const destination = `/dest-file-${Math.random()}.txt` - await mfs.write(source, bufferStream(100), { + await mfs.write(source, crypto.randomBytes(100), { create: true }) - await mfs.write(destination, bufferStream(100), { + await mfs.write(destination, crypto.randomBytes(100), { create: true }) try { await mfs.cp(source, destination) - throw new Error('No error was thrown for a non-existent file') + throw new Error('No error was thrown when trying to overwrite a file') } catch (err) { expect(err.message).to.contain('directory already has entry by that name') } @@ -75,7 +75,7 @@ describe('cp', () => { it('refuses to copy a file to itself', async () => { const source = `/source-file-${Math.random()}.txt` - await mfs.write(source, bufferStream(100), { + await mfs.write(source, crypto.randomBytes(100), { create: true }) @@ -90,18 +90,15 @@ describe('cp', () => { it('copies a file to new location', async () => { const source = `/source-file-${Math.random()}.txt` const destination = `/dest-file-${Math.random()}.txt` - let data = Buffer.alloc(0) + let data = crypto.randomBytes(500) - await mfs.write(source, bufferStream(500, { - collector: (bytes) => { - data = Buffer.concat([data, bytes]) - } - }), { + await mfs.write(source, data, { create: true }) await mfs.cp(source, destination) - const buffer = await mfs.read(destination) + + let buffer = await streamToBuffer(mfs.read(destination)) expect(buffer).to.deep.equal(data) }) @@ -111,7 +108,7 @@ describe('cp', () => { const directory = `/dest-directory-${Math.random()}` const destination = `${directory}${source}` - await mfs.write(source, bufferStream(500), { + await mfs.write(source, crypto.randomBytes(500), { create: true }) await mfs.mkdir(directory) @@ -153,19 +150,15 @@ describe('cp', () => { it('copies multiple files to new location', async () => { const sources = [{ path: `/source-file-${Math.random()}.txt`, - data: Buffer.alloc(0) + data: crypto.randomBytes(500) }, { path: `/source-file-${Math.random()}.txt`, - data: Buffer.alloc(0) + data: crypto.randomBytes(500) }] const destination = `/dest-dir-${Math.random()}` for (const source of sources) { - await mfs.write(source.path, bufferStream(500, { - collector: (bytes) => { - source.data = Buffer.concat([source.data, bytes]) - } - }), { + await mfs.write(source.path, source.data, { create: true }) } @@ -175,7 +168,7 @@ describe('cp', () => { }) for (const source of sources) { - const buffer = await mfs.read(`${destination}${source.path}`) + const buffer = await streamToBuffer(mfs.read(`${destination}${source.path}`)) expect(buffer).to.deep.equal(source.data) } @@ -185,12 +178,30 @@ describe('cp', () => { const source = `/source-file-${Math.random()}.txt` const destination = `/dest-file-${Math.random()}.txt` - await mfs.write(source, bufferStream(100), { + await mfs.write(source, crypto.randomBytes(100), { create: true }) const stats = await mfs.stat(source) - await mfs.cp(`/ipfs/${stats.hash}`, destination) + await mfs.cp(`/ipfs/${stats.cid}`, destination) + + const destinationStats = await mfs.stat(destination) + expect(destinationStats.size).to.equal(100) + }) + + it('copies files from deep ipfs paths', async () => { + const dir = `dir-${Math.random()}` + const file = `source-file-${Math.random()}.txt` + const source = `/${dir}/${file}` + const destination = `/dest-file-${Math.random()}.txt` + + await mfs.write(source, crypto.randomBytes(100), { + create: true, + parents: true + }) + + const stats = await mfs.stat(`/${dir}`) + await mfs.cp(`/ipfs/${stats.cid}/${file}`, destination) const destinationStats = await mfs.stat(destination) expect(destinationStats.size).to.equal(100) @@ -211,9 +222,7 @@ describe('cp', () => { // should still be a sharded directory expect((await mfs.stat(finalShardedDirPath)).type).to.equal('hamt-sharded-directory') - const files = await mfs.ls(finalShardedDirPath, { - long: true - }) + const files = await streamToArray(mfs.ls(finalShardedDirPath)) expect(files.length).to.be.ok() }) diff --git a/test/flush.spec.js b/test/flush.spec.js index d9176f1919..fced064338 100644 --- a/test/flush.spec.js +++ b/test/flush.spec.js @@ -4,9 +4,7 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const { - createMfs -} = require('./helpers') +const createMfs = require('./helpers/create-mfs') describe('flush', () => { let mfs diff --git a/test/helpers/cid-at-path.js b/test/helpers/cid-at-path.js index 830aa5454f..c0e10c8876 100644 --- a/test/helpers/cid-at-path.js +++ b/test/helpers/cid-at-path.js @@ -1,23 +1,22 @@ 'use strict' -const CID = require('cids') -const { - toPathComponents -} = require('../../src/core/utils') +const toPathComponents = require('../../src/core/utils/to-path-components') module.exports = async (path, mfs) => { const parts = toPathComponents(path) const fileName = parts.pop() const directory = `/${parts.join('/')}` - const files = (await mfs.ls(directory, { + const files = [] + + for await (const file of mfs.ls(directory, { long: true - })) + })) { + files.push(file) + } const file = files .filter(file => file.name === fileName) .pop() - return new CID( - file.hash - ) + return file.cid } diff --git a/test/helpers/collect-leaf-cids.js b/test/helpers/collect-leaf-cids.js deleted file mode 100644 index d71a9c7613..0000000000 --- a/test/helpers/collect-leaf-cids.js +++ /dev/null @@ -1,40 +0,0 @@ -'use strict' - -const pull = require('pull-stream') -const traverse = require('pull-traverse') -const CID = require('cids') - -module.exports = (mfs, multihash) => { - return new Promise((resolve, reject) => { - pull( - traverse.depthFirst(new CID(multihash), (cid) => { - return pull( - pull.values([cid]), - pull.asyncMap((cid, callback) => { - mfs.ipld.get(cid, (error, result) => { - callback(error, !error && result.value) - }) - }), - pull.asyncMap((node, callback) => { - if (!node.links) { - return callback() - } - - return callback( - null, node.links.map(link => link.cid) - ) - }), - pull.filter(Boolean), - pull.flatten() - ) - }), - pull.collect((error, cids) => { - if (error) { - return reject(error) - } - - resolve(cids) - }) - ) - }) -} diff --git a/test/helpers/create-mfs.js b/test/helpers/create-mfs.js new file mode 100644 index 0000000000..cb5264268e --- /dev/null +++ b/test/helpers/create-mfs.js @@ -0,0 +1,56 @@ +'use strict' + +const core = require('../../src/core') +const isWebWorker = require('detect-webworker') +const promisify = require('promisify-es6') +const { + MemoryDatastore +} = require('interface-datastore') +const Ipld = require('ipld') +const Repo = require('ipfs-repo') +const BlockService = require('ipfs-block-service') + +const createMfs = async () => { + let repo = new Repo(`test-repo-${Date.now()}`, { + lock: 'memory', + storageBackends: { + root: MemoryDatastore, + blocks: MemoryDatastore, + keys: MemoryDatastore, + datastore: MemoryDatastore + } + }) + + repo.init = promisify(repo.init, { + context: repo + }) + repo.open = promisify(repo.open, { + context: repo + }) + + await repo.init({}) + await repo.open() + + const bs = new BlockService(repo) + + const ipld = new Ipld({ + blockService: bs + }) + + const mfs = core({ + ipld, + datastore: repo.datastore, + blocks: bs, + + // https://github.com/Joris-van-der-Wel/karma-mocha-webworker/issuses/4 + // There is no IPFS node running on the main thread so run it on the + // worker along with the tests + repoOwner: isWebWorker + }) + + mfs.ipld = ipld + + return mfs +} + +module.exports = createMfs diff --git a/test/helpers/create-shard.js b/test/helpers/create-shard.js index dc2cbcfb59..34fc36c08d 100644 --- a/test/helpers/create-shard.js +++ b/test/helpers/create-shard.js @@ -1,31 +1,16 @@ 'use strict' -const pull = require('pull-stream/pull') -const values = require('pull-stream/sources/values') -const collect = require('pull-stream/sinks/collect') const importer = require('ipfs-unixfs-importer') -const CID = require('cids') +const last = require('async-iterator-last') -const createShard = (ipld, files, shardSplitThreshold = 10) => { - return new Promise((resolve, reject) => { - pull( - values(files), - importer(ipld, { - shardSplitThreshold, - reduceSingleLeafToSelf: false, // same as go-ipfs-mfs implementation, differs from `ipfs add`(!) - leafType: 'raw' // same as go-ipfs-mfs implementation, differs from `ipfs add`(!) - }), - collect((err, files) => { - if (err) { - return reject(err) - } +const createShard = async (ipld, files, shardSplitThreshold = 10) => { + let result = await last(importer(files, ipld, { + shardSplitThreshold, + reduceSingleLeafToSelf: false, // same as go-ipfs-mfs implementation, differs from `ipfs add`(!) + leafType: 'raw' // same as go-ipfs-mfs implementation, differs from `ipfs add`(!) + })) - const dir = files[files.length - 1] - - resolve(new CID(dir.multihash)) - }) - ) - }) + return result.cid } module.exports = createShard diff --git a/test/helpers/create-sharded-directory.js b/test/helpers/create-sharded-directory.js index 8730dbd91a..4343605972 100644 --- a/test/helpers/create-sharded-directory.js +++ b/test/helpers/create-sharded-directory.js @@ -12,8 +12,9 @@ module.exports = async (mfs, shardSplitThreshold = 10, files = shardSplitThresho content: Buffer.from([0, 1, 2, 3, 4, 5, index]) })), shardSplitThreshold) - await mfs.cp(`/ipfs/${cid.toBaseEncodedString()}`, dirPath) + await mfs.cp(`/ipfs/${cid}`, dirPath) + expect((await mfs.stat(`/ipfs/${cid}`)).type).to.equal('hamt-sharded-directory') expect((await mfs.stat(dirPath)).type).to.equal('hamt-sharded-directory') return dirPath diff --git a/test/helpers/find-tree-with-depth.js b/test/helpers/find-tree-with-depth.js index 2ec9191141..e356943588 100644 --- a/test/helpers/find-tree-with-depth.js +++ b/test/helpers/find-tree-with-depth.js @@ -36,7 +36,7 @@ const load = async (ipld, cid) => { const findChildrenAtDepth = async (ipld, cid, children, depth, currentDepth = 0) => { const node = await load(ipld, cid) - const fileLinks = node.links.filter(link => link.name) + const fileLinks = node.links.filter(link => link.Name) if (currentDepth === depth && fileLinks.length >= children) { return true diff --git a/test/helpers/index.js b/test/helpers/index.js deleted file mode 100644 index d817c272de..0000000000 --- a/test/helpers/index.js +++ /dev/null @@ -1,48 +0,0 @@ -'use strict' - -const core = require('../../src/core') -const isWebWorker = require('detect-webworker') -const promisify = require('promisify-es6') -const InMemoryDataStore = require('interface-datastore').MemoryDatastore -const Ipld = require('ipld') -const inMemoryIpld = promisify(require('ipld-in-memory').bind(null, Ipld)) - -const createMfs = async () => { - let ipld = await inMemoryIpld() - let datastore = new InMemoryDataStore() - - const mfs = core({ - ipld, - repo: { - datastore - }, - - // https://github.com/Joris-van-der-Wel/karma-mocha-webworker/issues/4 - // There is no IPFS node running on the main thread so run it on the - // worker along with the tests - repoOwner: isWebWorker - }) - - // to allow tests to verify information - mfs.ipld = { - get: promisify(ipld.get.bind(ipld)), - getMany: promisify(ipld.getMany.bind(ipld)), - put: promisify(ipld.put.bind(ipld)) - } - mfs.datastore = datastore - - return mfs -} - -module.exports = { - createMfs, - cidAtPath: require('./cid-at-path'), - collectLeafCids: require('./collect-leaf-cids'), - createShard: require('./create-shard'), - createShardedDirectory: require('./create-sharded-directory'), - createTwoShards: require('./create-two-shards'), - findTreeWithDepth: require('./find-tree-with-depth'), - printTree: require('./print-tree'), - EMPTY_DIRECTORY_HASH: 'QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn', - EMPTY_DIRECTORY_HASH_BASE32: 'bafybeiczsscdsbs7ffqz55asqdf3smv6klcw3gofszvwlyarci47bgf354' -} diff --git a/test/helpers/print-tree.js b/test/helpers/print-tree.js index 34bce64250..baa9d647c9 100644 --- a/test/helpers/print-tree.js +++ b/test/helpers/print-tree.js @@ -1,26 +1,14 @@ 'use strict' -const load = async (cid, ipld) => { - return new Promise((resolve, reject) => { - ipld.get(cid, (err, res) => { - if (err) { - return reject(err) - } - - resolve(res.value) - }) - }) -} - const printTree = async (ipld, cid, indentation = '', name = '') => { - console.info(indentation, name, cid.toBaseEncodedString()) // eslint-disable-line no-console + console.info(`${indentation} ${name} ${cid}`) // eslint-disable-line no-console - const node = await load(cid, ipld) - const fileLinks = node.links - .filter(link => link.name) + const node = await ipld.get(cid) + const fileLinks = node.Links + .filter(link => link.Name) for (let i = 0; i < fileLinks.length; i++) { - await printTree(ipld, fileLinks[i].cid, ` ${indentation}`, fileLinks[i].name) + await printTree(ipld, fileLinks[i].Hash, ` ${indentation}`, fileLinks[i].Name) } } diff --git a/test/helpers/random-bytes.js b/test/helpers/random-bytes.js deleted file mode 100644 index 7ed4191dbd..0000000000 --- a/test/helpers/random-bytes.js +++ /dev/null @@ -1,21 +0,0 @@ -'use strict' - -const crypto = require('crypto') -const MAX_BYTES = 65536 - -// One day this will be merged: https://github.com/crypto-browserify/randombytes/pull/16 -module.exports = function randomBytes (num) { - const bytes = Buffer.allocUnsafe(num) - - for (let offset = 0; offset < num; offset += MAX_BYTES) { - let size = MAX_BYTES - - if ((offset + size) > num) { - size = num - offset - } - - crypto.randomFillSync(bytes, offset, size) - } - - return bytes -} diff --git a/test/helpers/stream-to-array.js b/test/helpers/stream-to-array.js new file mode 100644 index 0000000000..882930ee58 --- /dev/null +++ b/test/helpers/stream-to-array.js @@ -0,0 +1,11 @@ +'use strict' + +module.exports = async (stream) => { + const arr = [] + + for await (const entry of stream) { + arr.push(entry) + } + + return arr +} diff --git a/test/helpers/stream-to-buffer.js b/test/helpers/stream-to-buffer.js new file mode 100644 index 0000000000..caab7c81de --- /dev/null +++ b/test/helpers/stream-to-buffer.js @@ -0,0 +1,11 @@ +'use strict' + +module.exports = async (stream) => { + let buffer = Buffer.alloc(0) + + for await (const buf of stream) { + buffer = Buffer.concat([buffer, buf], buffer.length + buf.length) + } + + return buffer +} diff --git a/test/helpers/traverse-leaf-nodes.js b/test/helpers/traverse-leaf-nodes.js new file mode 100644 index 0000000000..58e2fdb564 --- /dev/null +++ b/test/helpers/traverse-leaf-nodes.js @@ -0,0 +1,20 @@ +'use strict' + +module.exports = function traverseLeafNodes (mfs, cid) { + async function * traverse (cid) { + const node = await mfs.ipld.get(cid) + + if (Buffer.isBuffer(node) || !node.Links.length) { + yield { + node, + cid + } + + return + } + + node.Links.forEach(link => traverse(link.Hash)) + } + + return traverse(cid) +} diff --git a/test/ls.spec.js b/test/ls.spec.js index 60887b4f37..ef58d332a1 100644 --- a/test/ls.spec.js +++ b/test/ls.spec.js @@ -4,325 +4,201 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const pull = require('pull-stream/pull') -const collect = require('pull-stream/sinks/collect') -const randomBytes = require('./helpers/random-bytes') const CID = require('cids') const { FILE_TYPES } = require('../src') - -const { - createMfs, - createShardedDirectory -} = require('./helpers') +const createMfs = require('./helpers/create-mfs') +const createShardedDirectory = require('./helpers/create-sharded-directory') +const streamToArray = require('./helpers/stream-to-array') +const crypto = require('crypto') describe('ls', () => { let mfs - let largeFile = randomBytes(490668) + let largeFile = crypto.randomBytes(490668) before(async () => { mfs = await createMfs() }) - const methods = [{ - name: 'ls', - ls: function () { - return mfs.ls.apply(mfs, arguments) - }, - collect: (entries) => entries - }, { - name: 'lsPullStream', - ls: function () { - return Promise.resolve(mfs.lsPullStream.apply(mfs, arguments)) - }, - collect: (stream) => { - return new Promise((resolve, reject) => { - pull( - stream, - collect((error, entries) => { - if (error) { - return reject(error) - } - - resolve(entries) - }) - ) - }) + it('lists the root directory by default', async () => { + const fileName = `small-file-${Math.random()}.txt` + const content = Buffer.from('Hello world') + + await mfs.write(`/${fileName}`, content, { + create: true + }) + + const files = await streamToArray(mfs.ls()) + + expect(files.find(file => file.name === fileName)).to.be.ok() + }) + + it('refuses to lists files with an empty path', async () => { + try { + for await (const _ of mfs.ls('')) { // eslint-disable-line no-unused-vars + // falala + } + + throw new Error('No error was thrown for an empty path') + } catch (err) { + expect(err.code).to.equal('ERR_NO_PATH') } - }, { - name: 'lsReadableStream', - ls: function () { - return Promise.resolve(mfs.lsReadableStream.apply(mfs, arguments)) - }, - collect: (stream) => { - return new Promise((resolve, reject) => { - let entries = [] - - stream.on('data', (entry) => { - entries.push(entry) - }) - - stream.on('end', (entry) => { - if (entry) { - entries.push(entry) - } - - resolve(entries) - }) - - stream.on('error', (error) => { - reject(error) - }) - }) + }) + + it('refuses to lists files with an invalid path', async () => { + try { + for await (const _ of mfs.ls('not-valid')) { // eslint-disable-line no-unused-vars + // falala + } + + throw new Error('No error was thrown for an empty path') + } catch (err) { + expect(err.code).to.equal('ERR_INVALID_PATH') } - }] - - methods.forEach(method => { - describe(`ls ${method.name}`, () => { - it('lists the root directory by default', async () => { - const fileName = `small-file-${Math.random()}.txt` - const content = Buffer.from('Hello world') - - await mfs.write(`/${fileName}`, content, { - create: true - }) - const result = await method.ls() - const files = await method.collect(result) - - expect(files.find(file => file.name === fileName)).to.be.ok() - }) - - it('refuses to lists files with an empty path', async () => { - try { - await method.collect(await method.ls('')) - throw new Error('No error was thrown for an empty path') - } catch (err) { - expect(err.message).to.contain('paths must not be empty') - } - }) - - it('refuses to lists files with an invalid path', async () => { - try { - await method.collect(await method.ls('not-valid')) - throw new Error('No error was thrown for an empty path') - } catch (err) { - expect(err.message).to.contain('paths must start with a leading /') - } - }) - - it('lists files in a directory', async () => { - const dirName = `dir-${Math.random()}` - const fileName = `small-file-${Math.random()}.txt` - const content = Buffer.from('Hello world') - - await mfs.write(`/${dirName}/${fileName}`, content, { - create: true, - parents: true - }) - - const stream = await method.ls(`/${dirName}`, {}) - const files = await method.collect(stream) - - expect(files.length).to.equal(1) - expect(files[0].name).to.equal(fileName) - expect(files[0].type).to.equal(FILE_TYPES.file) - expect(files[0].size).to.equal(0) - expect(files[0].hash).to.equal('') - }) - - it('lists files in a directory with meta data', async () => { - const dirName = `dir-${Math.random()}` - const fileName = `small-file-${Math.random()}.txt` - const content = Buffer.from('Hello world') - - await mfs.write(`/${dirName}/${fileName}`, content, { - create: true, - parents: true - }) - - const stream = await method.ls(`/${dirName}`, { - long: true - }) - const files = await method.collect(stream) - - expect(files.length).to.equal(1) - expect(files[0].name).to.equal(fileName) - expect(files[0].type).to.equal(FILE_TYPES.file) - expect(files[0].size).to.equal(content.length) - }) - - it('lists a file', async () => { - const fileName = `small-file-${Math.random()}.txt` - const content = Buffer.from('Hello world') - - await mfs.write(`/${fileName}`, content, { - create: true - }) - - const stream = await method.ls(`/${fileName}`) - const files = await method.collect(stream) - - expect(files.length).to.equal(1) - expect(files[0].name).to.equal(fileName) - expect(files[0].type).to.equal(FILE_TYPES.file) - expect(files[0].size).to.equal(0) - expect(files[0].hash).to.equal('') - }) - - it('lists a file with meta data', async () => { - const fileName = `small-file-${Math.random()}.txt` - const content = Buffer.from('Hello world') - - await mfs.write(`/${fileName}`, content, { - create: true - }) - const stream = await method.ls(`/${fileName}`, { - long: true - }) - const files = await method.collect(stream) - - expect(files.length).to.equal(1) - expect(files[0].name).to.equal(fileName) - expect(files[0].type).to.equal(FILE_TYPES.file) - expect(files[0].size).to.equal(content.length) - }) - - it('lists a file with a base32 hash', async () => { - const fileName = `small-file-${Math.random()}.txt` - const content = Buffer.from('Hello world') - - await mfs.write(`/${fileName}`, content, { - create: true - }) - - const stream = await method.ls(`/${fileName}`, { - long: true, - cidBase: 'base32' - }) - const files = await method.collect(stream) - - expect(files.length).to.equal(1) - expect(files[0].name).to.equal(fileName) - expect(files[0].type).to.equal(FILE_TYPES.file) - expect(files[0].size).to.equal(content.length) - expect(files[0].hash.startsWith('b')).to.equal(true) - }) - - it('fails to list non-existent file', async () => { - try { - const stream = await method.ls('/i-do-not-exist') - await method.collect(stream) - throw new Error('No error was thrown for a non-existent file') - } catch (err) { - expect(err.message).to.contain('does not exist') - } - }) - - it('lists a raw node', async () => { - const filePath = '/stat/large-file.txt' - - await mfs.write(filePath, largeFile, { - create: true, - parents: true, - rawLeaves: true - }) - - const stats = await mfs.stat(filePath) - const result = await mfs.ipld.get(new CID(stats.hash)) - const node = result.value - const child = node.links[0] - - expect(child.cid.codec).to.equal('raw') - - const rawNodeContents = await mfs.ls(`/ipfs/${child.cid}/`, { - long: true - }) - - expect(rawNodeContents[0].type).to.equal(0) // this is what go does - expect(rawNodeContents[0].hash).to.equal(child.cid.toBaseEncodedString()) - }) - - it('lists a raw node in an mfs directory', async () => { - const filePath = '/stat/large-file.txt' - - await mfs.write(filePath, largeFile, { - create: true, - parents: true, - rawLeaves: true - }) - - const stats = await mfs.stat(filePath) - const cid = new CID(stats.hash) - const result = await mfs.ipld.get(cid) - const node = result.value - const child = node.links[0] - - expect(child.cid.codec).to.equal('raw') - - const dir = `/dir-with-raw-${Date.now()}` - const path = `${dir}/raw-${Date.now()}` - - await mfs.mkdir(dir) - await mfs.cp(`/ipfs/${child.cid.toBaseEncodedString()}`, path) - - const rawNodeContents = await mfs.ls(path, { - long: true - }) - - expect(rawNodeContents[0].type).to.equal(0) // this is what go does - expect(rawNodeContents[0].hash).to.equal(child.cid.toBaseEncodedString()) - }) - - it('lists a sharded directory contents', async () => { - const shardSplitThreshold = 10 - const fileCount = 11 - const dirPath = await createShardedDirectory(mfs, shardSplitThreshold, fileCount) - - const files = await method.collect(await method.ls(dirPath, { - long: true - })) - - expect(files.length).to.equal(fileCount) - - files.forEach(file => { - // should be a file - expect(file.type).to.equal(0) - }) - }) - - it('lists a file inside a sharded directory directly', async () => { - const dirPath = await createShardedDirectory(mfs) - - const files = await method.collect(await method.ls(dirPath, { - long: true - })) - - const filePath = `${dirPath}/${files[0].name}` - - // should be able to ls new file directly - expect(await method.collect(await method.ls(filePath, { - long: true - }))).to.not.be.empty() - }) - - it('lists the contents of a directory inside a sharded directory', async () => { - const shardedDirPath = await createShardedDirectory(mfs) - const dirPath = `${shardedDirPath}/subdir-${Math.random()}` - const fileName = `small-file-${Math.random()}.txt` - - await mfs.mkdir(`${dirPath}`) - await mfs.write(`${dirPath}/${fileName}`, Buffer.from([0, 1, 2, 3]), { - create: true - }) - - const files = await method.collect(await method.ls(dirPath, { - long: true - })) - - expect(files.length).to.equal(1) - expect(files.filter(file => file.name === fileName)).to.be.ok() - }) + }) + + it('lists files in a directory', async () => { + const dirName = `dir-${Math.random()}` + const fileName = `small-file-${Math.random()}.txt` + const content = Buffer.from('Hello world') + + await mfs.write(`/${dirName}/${fileName}`, content, { + create: true, + parents: true + }) + + const files = await streamToArray(mfs.ls(`/${dirName}`)) + + expect(files.find(file => file.name === fileName)).to.be.ok() + expect(files.length).to.equal(1) + expect(files[0].name).to.equal(fileName) + expect(files[0].type).to.equal(FILE_TYPES.file) + expect(files[0].size).to.equal(content.length) + expect(CID.isCID(files[0].cid)).to.be.ok() + }) + + it('lists a file', async () => { + const fileName = `small-file-${Math.random()}.txt` + const content = Buffer.from('Hello world') + + await mfs.write(`/${fileName}`, content, { + create: true + }) + + const files = await streamToArray(mfs.ls(`/${fileName}`)) + + expect(files.length).to.equal(1) + expect(files[0].name).to.equal(fileName) + expect(files[0].type).to.equal(FILE_TYPES.file) + expect(files[0].size).to.equal(content.length) + expect(CID.isCID(files[0].cid)).to.be.ok() + }) + + it('fails to list non-existent file', async () => { + try { + for await (const _ of mfs.ls('/i-do-not-exist')) { // eslint-disable-line no-unused-vars + // falala + } + + throw new Error('No error was thrown for a non-existent file') + } catch (err) { + expect(err.code).to.equal('ERR_NOT_FOUND') + } + }) + + it('lists a raw node', async () => { + const filePath = '/stat/large-file.txt' + + await mfs.write(filePath, largeFile, { + create: true, + parents: true, + rawLeaves: true }) + + const stats = await mfs.stat(filePath) + const node = await mfs.ipld.get(stats.cid) + const child = node.Links[0] + + expect(child.Hash.codec).to.equal('raw') + + const files = await streamToArray(mfs.ls(`/ipfs/${child.Hash}`)) + + expect(files.length).to.equal(1) + expect(files[0].type).to.equal(0) // this is what go does + expect(files[0].cid.toString()).to.equal(child.Hash.toString()) + }) + + it('lists a raw node in an mfs directory', async () => { + const filePath = '/stat/large-file.txt' + + await mfs.write(filePath, largeFile, { + create: true, + parents: true, + rawLeaves: true + }) + + const stats = await mfs.stat(filePath) + const cid = stats.cid + const node = await mfs.ipld.get(cid) + const child = node.Links[0] + + expect(child.Hash.codec).to.equal('raw') + + const dir = `/dir-with-raw-${Date.now()}` + const path = `${dir}/raw-${Date.now()}` + + await mfs.mkdir(dir) + await mfs.cp(`/ipfs/${child.Hash}`, path) + + const files = await streamToArray(mfs.ls(`/ipfs/${child.Hash}`)) + + expect(files.length).to.equal(1) + expect(files[0].type).to.equal(0) // this is what go does + expect(files[0].cid.toString()).to.equal(child.Hash.toString()) + }) + + it('lists a sharded directory contents', async () => { + const shardSplitThreshold = 10 + const fileCount = 11 + const dirPath = await createShardedDirectory(mfs, shardSplitThreshold, fileCount) + + const files = await streamToArray(mfs.ls(dirPath)) + + expect(files.length).to.equal(fileCount) + + files.forEach(file => { + // should be a file + expect(file.type).to.equal(0) + }) + }) + + it('lists a file inside a sharded directory directly', async () => { + const dirPath = await createShardedDirectory(mfs) + const files = await streamToArray(mfs.ls(dirPath)) + + const filePath = `${dirPath}/${files[0].name}` + + // should be able to ls new file directly + const file = await streamToArray(mfs.ls(filePath)) + + expect(file.length).to.equal(1) + expect(file[0].name).to.equal(files[0].name) + }) + + it('lists the contents of a directory inside a sharded directory', async () => { + const shardedDirPath = await createShardedDirectory(mfs) + const dirPath = `${shardedDirPath}/subdir-${Math.random()}` + const fileName = `small-file-${Math.random()}.txt` + + await mfs.mkdir(`${dirPath}`) + await mfs.write(`${dirPath}/${fileName}`, Buffer.from([0, 1, 2, 3]), { + create: true + }) + + const files = await streamToArray(mfs.ls(dirPath)) + + expect(files.length).to.equal(1) + expect(files.filter(file => file.name === fileName)).to.be.ok() }) }) diff --git a/test/mkdir.spec.js b/test/mkdir.spec.js index 34860a1b00..628f2dde20 100644 --- a/test/mkdir.spec.js +++ b/test/mkdir.spec.js @@ -5,11 +5,10 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect const multihash = require('multihashes') -const { - createMfs, - cidAtPath, - createShardedDirectory -} = require('./helpers') +const createMfs = require('./helpers/create-mfs') +const cidAtPath = require('./helpers/cid-at-path') +const createShardedDirectory = require('./helpers/create-sharded-directory') +const all = require('async-iterator-all') describe('mkdir', () => { let mfs @@ -32,7 +31,7 @@ describe('mkdir', () => { await mfs.mkdir('foo') throw new Error('No error was thrown when creating an directory with no leading slash') } catch (err) { - expect(err.message).to.contain('paths must start with a leading /') + expect(err.code).to.equal('ERR_INVALID_PATH') } }) @@ -66,7 +65,8 @@ describe('mkdir', () => { const stats = await mfs.stat(path) expect(stats.type).to.equal('directory') - const files = await mfs.ls(path) + const files = await all(mfs.ls(path)) + expect(files.length).to.equal(0) }) @@ -81,9 +81,10 @@ describe('mkdir', () => { await mfs.mkdir(path, { parents: false }) + throw new Error('Did not refuse to create a path that already exists') } catch (err) { - expect(err.message).to.contain('file already exists') + expect(err.code).to.equal('ERR_ALREADY_EXISTS') } }) @@ -106,11 +107,20 @@ describe('mkdir', () => { parents: true }) - const files = await mfs.ls(path) + const files = await all(mfs.ls(path)) expect(files.length).to.equal(0) }) + it('creates nested directories', async () => { + await mfs.mkdir('/nested-dir') + await mfs.mkdir('/nested-dir/baz') + + const files = await all(mfs.ls('/nested-dir')) + + expect(files.length).to.equal(1) + }) + it('creates a nested directory with a different CID version to the parent', async () => { const directory = `cid-versions-${Math.random()}` const directoryPath = `/${directory}` diff --git a/test/mv.spec.js b/test/mv.spec.js index 7e9641fa5e..422c3c7e81 100644 --- a/test/mv.spec.js +++ b/test/mv.spec.js @@ -4,11 +4,10 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const bufferStream = require('pull-buffer-stream') -const { - createMfs, - createShardedDirectory -} = require('./helpers') +const createMfs = require('./helpers/create-mfs') +const createShardedDirectory = require('./helpers/create-sharded-directory') +const streamToBuffer = require('./helpers/stream-to-buffer') +const crypto = require('crypto') describe('mv', () => { let mfs @@ -38,18 +37,14 @@ describe('mv', () => { it('moves a file', async () => { const source = `/source-file-${Math.random()}.txt` const destination = `/dest-file-${Math.random()}.txt` - let data = Buffer.alloc(0) + let data = crypto.randomBytes(500) - await mfs.write(source, bufferStream(500, { - collector: (bytes) => { - data = Buffer.concat([data, bytes]) - } - }), { + await mfs.write(source, data, { create: true }) await mfs.mv(source, destination) - const buffer = await mfs.read(destination) + const buffer = await streamToBuffer(mfs.read(destination)) expect(buffer).to.deep.equal(data) try { diff --git a/test/read.spec.js b/test/read.spec.js index 3f1af269fe..74092c4e15 100644 --- a/test/read.spec.js +++ b/test/read.spec.js @@ -4,231 +4,148 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const bufferStream = require('pull-buffer-stream') -const pull = require('pull-stream/pull') -const collect = require('pull-stream/sinks/collect') -const { - createMfs, - createShardedDirectory -} = require('./helpers') -const randomBytes = require('./helpers/random-bytes') +const createMfs = require('./helpers/create-mfs') +const createShardedDirectory = require('./helpers/create-sharded-directory') +const crypto = require('crypto') +const streamToBuffer = require('./helpers/stream-to-buffer') describe('read', () => { let mfs - let smallFile = randomBytes(13) + let smallFile = crypto.randomBytes(13) before(async () => { mfs = await createMfs() }) - const methods = [{ - name: 'read', - read: function () { - return mfs.read.apply(mfs, arguments) - }, - collect: (buffer) => buffer - }, { - name: 'readPullStream', - read: function () { - return Promise.resolve(mfs.readPullStream.apply(mfs, arguments)) - }, - collect: (stream) => { - return new Promise((resolve, reject) => { - pull( - stream, - collect((err, buffers) => { - if (err) { - return reject(err) - } - - resolve(Buffer.concat(buffers)) - }) - ) - }) - } - }, { - name: 'readReadableStream', - read: function () { - return Promise.resolve(mfs.readReadableStream.apply(mfs, arguments)) - }, - collect: (stream) => { - return new Promise((resolve, reject) => { - let data = Buffer.alloc(0) - - stream.on('data', (buffer) => { - data = Buffer.concat([data, buffer]) - }) - - stream.on('end', () => { - resolve(data) - }) - - stream.on('error', (err) => { - reject(err) - }) + describe(`read`, () => { + it('reads a small file', async () => { + const filePath = '/small-file.txt' + + await mfs.write(filePath, smallFile, { + create: true }) - } - }] - methods.forEach(method => { - describe(`read ${method.name}`, () => { - it('reads a small file', async () => { - const filePath = '/small-file.txt' + const buffer = await streamToBuffer(mfs.read(filePath)) - await mfs.write(filePath, smallFile, { - create: true - }) + expect(buffer).to.deep.equal(smallFile) + }) - const result = await method.read(filePath) - const buffer = await method.collect(result) - expect(buffer).to.deep.equal(smallFile) - }) + it('reads a file with an offset', async () => { + const path = `/some-file-${Math.random()}.txt` + let data = crypto.randomBytes(100) + const offset = 10 - it('reads a file with an offset', async () => { - const path = `/some-file-${Math.random()}.txt` - let data = Buffer.alloc(0) - const offset = 10 - - await mfs.write(path, bufferStream(100, { - collector: (bytes) => { - data = Buffer.concat([data, bytes]) - } - }), { - create: true - }) - - const result = await method.read(path, { - offset - }) - const buffer = await method.collect(result) - - expect(buffer).to.deep.equal(data.slice(offset)) + await mfs.write(path, data, { + create: true }) - it('reads a file with a length', async () => { - const path = `/some-file-${Math.random()}.txt` - let data = Buffer.alloc(0) - const length = 10 - - await mfs.write(path, bufferStream(100, { - collector: (bytes) => { - data = Buffer.concat([data, bytes]) - } - }), { - create: true - }) - - const result = await method.read(path, { - length - }) - const buffer = await method.collect(result) - - expect(buffer).to.deep.equal(data.slice(0, length)) - }) + const buffer = await streamToBuffer(mfs.read(path, { + offset + })) - it('reads a file with a legacy count argument', async () => { - const path = `/some-file-${Math.random()}.txt` - let data = Buffer.alloc(0) - const length = 10 - - await mfs.write(path, bufferStream(100, { - collector: (bytes) => { - data = Buffer.concat([data, bytes]) - } - }), { - create: true - }) - - const result = await method.read(path, { - count: length - }) - const buffer = await method.collect(result) - - expect(buffer).to.deep.equal(data.slice(0, length)) - }) + expect(buffer).to.deep.equal(data.slice(offset)) + }) - it('reads a file with an offset and a length', async () => { - const path = `/some-file-${Math.random()}.txt` - let data = Buffer.alloc(0) - const offset = 10 - const length = 10 - - await mfs.write(path, bufferStream(100, { - collector: (bytes) => { - data = Buffer.concat([data, bytes]) - } - }), { - create: true - }) - - const result = await method.read(path, { - offset, - length - }) - const buffer = await method.collect(result) - - expect(buffer).to.deep.equal(data.slice(offset, offset + length)) - }) + it('reads a file with a length', async () => { + const path = `/some-file-${Math.random()}.txt` + let data = crypto.randomBytes(100) + const length = 10 - it('reads a file with an offset and a legacy count argument', async () => { - const path = `/some-file-${Math.random()}.txt` - let data = Buffer.alloc(0) - const offset = 10 - const length = 10 + await mfs.write(path, data, { + create: true + }) - await mfs.write(path, bufferStream(100, { - collector: (bytes) => { - data = Buffer.concat([data, bytes]) - } - }), { - create: true - }) + const buffer = await streamToBuffer(mfs.read(path, { + length + })) - const result = await method.read(path, { - offset, - count: length - }) + expect(buffer).to.deep.equal(data.slice(0, length)) + }) - const buffer = await method.collect(result) + it('reads a file with a legacy count argument', async () => { + const path = `/some-file-${Math.random()}.txt` + let data = crypto.randomBytes(100) + const length = 10 - expect(buffer).to.deep.equal(data.slice(offset, offset + length)) + await mfs.write(path, data, { + create: true }) - it('refuses to read a directory', async () => { - const path = '/' + const buffer = await streamToBuffer(mfs.read(path, { + count: length + })) + + expect(buffer).to.deep.equal(data.slice(0, length)) + }) + + it('reads a file with an offset and a length', async () => { + const path = `/some-file-${Math.random()}.txt` + let data = crypto.randomBytes(100) + const offset = 10 + const length = 10 - try { - const result = await method.read(path) - await method.collect(result) - throw new Error('Should have errored on trying to read a directory') - } catch (err) { - expect(err.message).to.contain('was not a file') - } + await mfs.write(path, data, { + create: true }) - it('refuses to read a non-existent file', async () => { - try { - const stream = await method.read(`/file-${Math.random()}.txt`) - await method.collect(stream) - throw new Error('Should have errored on non-existent file') - } catch (err) { - expect(err.message).to.contain('does not exist') - } + const buffer = await streamToBuffer(mfs.read(path, { + offset, + length + })) + + expect(buffer).to.deep.equal(data.slice(offset, offset + length)) + }) + + it('reads a file with an offset and a legacy count argument', async () => { + const path = `/some-file-${Math.random()}.txt` + let data = crypto.randomBytes(100) + const offset = 10 + const length = 10 + + await mfs.write(path, data, { + create: true }) - it('reads file from inside a sharded directory', async () => { - const shardedDirPath = await createShardedDirectory(mfs) - const filePath = `${shardedDirPath}/file-${Math.random()}.txt` - const content = Buffer.from([0, 1, 2, 3, 4]) + const buffer = await streamToBuffer(mfs.read(path, { + offset, + count: length + })) + + expect(buffer).to.deep.equal(data.slice(offset, offset + length)) + }) + + it('refuses to read a directory', async () => { + const path = '/' + + try { + await streamToBuffer(mfs.read(path)) + throw new Error('Should have errored on trying to read a directory') + } catch (err) { + expect(err.code).to.equal('ERR_NOT_FILE') + } + }) - await mfs.write(filePath, content, { - create: true - }) + it('refuses to read a non-existent file', async () => { + try { + await streamToBuffer(mfs.read(`/file-${Math.random()}.txt`)) + throw new Error('Should have errored on non-existent file') + } catch (err) { + expect(err.code).to.equal('ERR_NOT_FOUND') + } + }) - const stream = await method.read(filePath) + it('reads file from inside a sharded directory', async () => { + const shardedDirPath = await createShardedDirectory(mfs) + const filePath = `${shardedDirPath}/file-${Math.random()}.txt` + const content = Buffer.from([0, 1, 2, 3, 4]) - expect(await method.collect(stream)).to.deep.equal(content) + await mfs.write(filePath, content, { + create: true }) + + const buffer = await streamToBuffer(mfs.read(filePath)) + + expect(buffer).to.deep.equal(content) }) }) }) diff --git a/test/rm.spec.js b/test/rm.spec.js index 08725ca5d7..0e662f0fd3 100644 --- a/test/rm.spec.js +++ b/test/rm.spec.js @@ -4,16 +4,13 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const bufferStream = require('pull-buffer-stream') -const CID = require('cids') -const { - createMfs, - createShardedDirectory, - createTwoShards -} = require('./helpers') +const createMfs = require('./helpers/create-mfs') +const createShardedDirectory = require('./helpers/create-sharded-directory') +const createTwoShards = require('./helpers/create-two-shards') +const crypto = require('crypto') const { FILE_SEPARATOR -} = require('../src/core/utils') +} = require('../src/core/utils/constants') describe('rm', () => { let mfs @@ -27,7 +24,7 @@ describe('rm', () => { await mfs.rm() throw new Error('No error was thrown for missing paths') } catch (err) { - expect(err.message).to.contain('Please supply at least one path to remove') + expect(err.code).to.equal('ERR_INVALID_PARAMS') } }) @@ -36,7 +33,7 @@ describe('rm', () => { await mfs.rm(FILE_SEPARATOR) throw new Error('No error was thrown for missing paths') } catch (err) { - expect(err.message).to.contain('Cannot delete root') + expect(err.code).to.equal('ERR_INVALID_PARAMS') } }) @@ -49,7 +46,7 @@ describe('rm', () => { await mfs.rm(path) throw new Error('No error was thrown for missing recursive flag') } catch (err) { - expect(err.message).to.contain(`${path} is a directory, use -r to remove directories`) + expect(err.code).to.equal('ERR_WAS_DIR') } }) @@ -58,14 +55,14 @@ describe('rm', () => { await mfs.rm(`/file-${Math.random()}`) throw new Error('No error was thrown for non-existent file') } catch (err) { - expect(err.message).to.contain('does not exist') + expect(err.code).to.equal('ERR_NOT_FOUND') } }) it('removes a file', async () => { const file = `/some-file-${Math.random()}.txt` - await mfs.write(file, bufferStream(100), { + await mfs.write(file, crypto.randomBytes(100), { create: true, parents: true }) @@ -86,11 +83,11 @@ describe('rm', () => { const file1 = `/some-file-${Math.random()}.txt` const file2 = `/some-file-${Math.random()}.txt` - await mfs.write(file1, bufferStream(100), { + await mfs.write(file1, crypto.randomBytes(100), { create: true, parents: true }) - await mfs.write(file2, bufferStream(100), { + await mfs.write(file2, crypto.randomBytes(100), { create: true, parents: true }) @@ -102,14 +99,14 @@ describe('rm', () => { await mfs.stat(file1) throw new Error('File #1 was not removed') } catch (err) { - expect(err.message).to.contain('does not exist') + expect(err.code).to.equal('ERR_NOT_FOUND') } try { await mfs.stat(file2) throw new Error('File #2 was not removed') } catch (err) { - expect(err.message).to.contain('does not exist') + expect(err.code).to.equal('ERR_NOT_FOUND') } }) @@ -125,7 +122,7 @@ describe('rm', () => { await mfs.stat(directory) throw new Error('Directory was not removed') } catch (err) { - expect(err.message).to.contain('does not exist') + expect(err.code).to.equal('ERR_NOT_FOUND') } }) @@ -142,17 +139,17 @@ describe('rm', () => { }) try { - await mfs.ls(subdirectory) + await mfs.stat(path) throw new Error('File was not removed') } catch (err) { - expect(err.message).to.contain('does not exist') + expect(err.code).to.equal('ERR_NOT_FOUND') } try { - await mfs.ls(directory) + await mfs.stat(directory) throw new Error('Directory was not removed') } catch (err) { - expect(err.message).to.contain('does not exist') + expect(err.code).to.equal('ERR_NOT_FOUND') } }) @@ -160,7 +157,7 @@ describe('rm', () => { const directory = `directory-${Math.random()}` const file = `/${directory}/some-file-${Math.random()}.txt` - await mfs.write(file, bufferStream(100), { + await mfs.write(file, crypto.randomBytes(100), { create: true, parents: true }) @@ -172,14 +169,14 @@ describe('rm', () => { await mfs.stat(file) throw new Error('File was not removed') } catch (err) { - expect(err.message).to.contain('does not exist') + expect(err.code).to.equal('ERR_NOT_FOUND') } try { await mfs.stat(`/${directory}`) throw new Error('Directory was not removed') } catch (err) { - expect(err.message).to.contain('does not exist') + expect(err.code).to.equal('ERR_NOT_FOUND') } }) @@ -203,15 +200,15 @@ describe('rm', () => { try { await mfs.stat(dirPath) throw new Error('Directory was not removed') - } catch (error) { - expect(error.message).to.contain('does not exist') + } catch (err) { + expect(err.code).to.equal('ERR_NOT_FOUND') } try { await mfs.stat(shardedDirPath) throw new Error('Directory was not removed') - } catch (error) { - expect(error.message).to.contain('does not exist') + } catch (err) { + expect(err.code).to.equal('ERR_NOT_FOUND') } }) @@ -233,15 +230,15 @@ describe('rm', () => { try { await mfs.stat(otherDirPath) throw new Error('Directory was not removed') - } catch (error) { - expect(error.message).to.contain('does not exist') + } catch (err) { + expect(err.code).to.equal('ERR_NOT_FOUND') } try { await mfs.stat(finalShardedDirPath) throw new Error('Directory was not removed') - } catch (error) { - expect(error.message).to.contain('does not exist') + } catch (err) { + expect(err.code).to.equal('ERR_NOT_FOUND') } }) @@ -255,15 +252,15 @@ describe('rm', () => { dirPath } = await createTwoShards(mfs.ipld, 15) - await mfs.cp(`/ipfs/${dirWithAllFiles.toBaseEncodedString()}`, dirPath) + await mfs.cp(`/ipfs/${dirWithAllFiles}`, dirPath) await mfs.rm(nextFile.path) const stats = await mfs.stat(dirPath) - const updatedDirCid = new CID(stats.hash) + const updatedDirCid = stats.cid expect(stats.type).to.equal('hamt-sharded-directory') - expect(updatedDirCid.toBaseEncodedString()).to.deep.equal(dirWithSomeFiles.toBaseEncodedString()) + expect(updatedDirCid.toString()).to.deep.equal(dirWithSomeFiles.toString()) }) it('results in the same hash as a sharded directory created by the importer when removing a subshard', async function () { @@ -276,15 +273,15 @@ describe('rm', () => { dirPath } = await createTwoShards(mfs.ipld, 31) - await mfs.cp(`/ipfs/${dirWithAllFiles.toBaseEncodedString()}`, dirPath) + await mfs.cp(`/ipfs/${dirWithAllFiles}`, dirPath) await mfs.rm(nextFile.path) const stats = await mfs.stat(dirPath) - const updatedDirCid = new CID(stats.hash) + const updatedDirCid = stats.cid expect(stats.type).to.equal('hamt-sharded-directory') - expect(updatedDirCid.toBaseEncodedString()).to.deep.equal(dirWithSomeFiles.toBaseEncodedString()) + expect(updatedDirCid.toString()).to.deep.equal(dirWithSomeFiles.toString()) }) it('results in the same hash as a sharded directory created by the importer when removing a file from a subshard of a subshard', async function () { @@ -297,15 +294,15 @@ describe('rm', () => { dirPath } = await createTwoShards(mfs.ipld, 2187) - await mfs.cp(`/ipfs/${dirWithAllFiles.toBaseEncodedString()}`, dirPath) + await mfs.cp(`/ipfs/${dirWithAllFiles}`, dirPath) await mfs.rm(nextFile.path) const stats = await mfs.stat(dirPath) - const updatedDirCid = new CID(stats.hash) + const updatedDirCid = stats.cid expect(stats.type).to.equal('hamt-sharded-directory') - expect(updatedDirCid.toBaseEncodedString()).to.deep.equal(dirWithSomeFiles.toBaseEncodedString()) + expect(updatedDirCid.toString()).to.deep.equal(dirWithSomeFiles.toString()) }) it('results in the same hash as a sharded directory created by the importer when removing a subshard of a subshard', async function () { @@ -318,14 +315,14 @@ describe('rm', () => { dirPath } = await createTwoShards(mfs.ipld, 139) - await mfs.cp(`/ipfs/${dirWithAllFiles.toBaseEncodedString()}`, dirPath) + await mfs.cp(`/ipfs/${dirWithAllFiles}`, dirPath) await mfs.rm(nextFile.path) const stats = await mfs.stat(dirPath) - const updatedDirCid = new CID(stats.hash) + const updatedDirCid = stats.cid expect(stats.type).to.equal('hamt-sharded-directory') - expect(updatedDirCid.toBaseEncodedString()).to.deep.equal(dirWithSomeFiles.toBaseEncodedString()) + expect(updatedDirCid.toString()).to.deep.equal(dirWithSomeFiles.toString()) }) }) diff --git a/test/stat.spec.js b/test/stat.spec.js index 633ed049a5..f3304e8bad 100644 --- a/test/stat.spec.js +++ b/test/stat.spec.js @@ -4,20 +4,14 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const randomBytes = require('./helpers/random-bytes') -const CID = require('cids') - -const { - createMfs, - createShardedDirectory, - EMPTY_DIRECTORY_HASH, - EMPTY_DIRECTORY_HASH_BASE32 -} = require('./helpers') +const crypto = require('crypto') +const createMfs = require('./helpers/create-mfs') +const createShardedDirectory = require('./helpers/create-sharded-directory') describe('stat', () => { let mfs - let smallFile = randomBytes(13) - let largeFile = randomBytes(490668) + let smallFile = crypto.randomBytes(13) + let largeFile = crypto.randomBytes(490668) before(async () => { mfs = await createMfs() @@ -62,46 +56,6 @@ describe('stat', () => { expect(stats.type).to.equal('directory') }) - it('returns only a hash', async () => { - const path = `/directory-${Math.random()}` - - await mfs.mkdir(path) - - const stats = await mfs.stat(path, { - hash: true - }) - - expect(Object.keys(stats).length).to.equal(1) - expect(stats.hash).to.equal(EMPTY_DIRECTORY_HASH) - }) - - it('returns only a base32 hash', async () => { - const path = `/directory-${Math.random()}` - - await mfs.mkdir(path) - - const stats = await mfs.stat(path, { - hash: true, - cidBase: 'base32' - }) - - expect(Object.keys(stats).length).to.equal(1) - expect(stats.hash).to.equal(EMPTY_DIRECTORY_HASH_BASE32) - }) - - it('returns only the size', async () => { - const path = `/directory-${Math.random()}` - - await mfs.mkdir(path) - - const stats = await mfs.stat(path, { - size: true - }) - - expect(Object.keys(stats).length).to.equal(1) - expect(stats.size).to.equal(0) - }) - it.skip('computes how much of the DAG is local', async () => { }) @@ -136,24 +90,6 @@ describe('stat', () => { expect(stats.type).to.equal('file') }) - it('stats a large file with base32', async () => { - const filePath = '/stat/large-file.txt' - - await mfs.write(filePath, largeFile, { - create: true, - parents: true - }) - - const stats = await mfs.stat(filePath, { - cidBase: 'base32' - }) - expect(stats.hash.startsWith('b')).to.equal(true) - expect(stats.size).to.equal(largeFile.length) - expect(stats.cumulativeSize).to.equal(490800) - expect(stats.blocks).to.equal(2) - expect(stats.type).to.equal('file') - }) - it('stats a raw node', async () => { const filePath = '/stat/large-file.txt' @@ -164,15 +100,14 @@ describe('stat', () => { }) const stats = await mfs.stat(filePath) - const result = await mfs.ipld.get(new CID(stats.hash)) - const node = result.value - const child = node.links[0] + const node = await mfs.ipld.get(stats.cid) + const child = node.Links[0] - expect(child.cid.codec).to.equal('raw') + expect(child.Hash.codec).to.equal('raw') - const rawNodeStats = await mfs.stat(`/ipfs/${child.cid.toBaseEncodedString()}`) + const rawNodeStats = await mfs.stat(`/ipfs/${child.Hash}`) - expect(rawNodeStats.hash).to.equal(child.cid.toBaseEncodedString()) + expect(rawNodeStats.cid.toString()).to.equal(child.Hash.toString()) expect(rawNodeStats.type).to.equal('file') // this is what go does }) @@ -186,21 +121,20 @@ describe('stat', () => { }) const stats = await mfs.stat(filePath) - const result = await mfs.ipld.get(new CID(stats.hash)) - const node = result.value - const child = node.links[0] + const node = await mfs.ipld.get(stats.cid) + const child = node.Links[0] - expect(child.cid.codec).to.equal('raw') + expect(child.Hash.codec).to.equal('raw') const dir = `/dir-with-raw-${Date.now()}` const path = `${dir}/raw-${Date.now()}` await mfs.mkdir(dir) - await mfs.cp(`/ipfs/${child.cid.toBaseEncodedString()}`, path) + await mfs.cp(`/ipfs/${child.Hash}`, path) const rawNodeStats = await mfs.stat(path) - expect(rawNodeStats.hash).to.equal(child.cid.toBaseEncodedString()) + expect(rawNodeStats.cid.toString()).to.equal(child.Hash.toString()) expect(rawNodeStats.type).to.equal('file') // this is what go does }) @@ -215,7 +149,12 @@ describe('stat', () => { it('stats a file inside a sharded directory', async () => { const shardedDirPath = await createShardedDirectory(mfs) - const files = await mfs.ls(`${shardedDirPath}`) + const files = [] + + for await (const file of mfs.ls(`${shardedDirPath}`)) { + files.push(file) + } + const stats = await mfs.stat(`${shardedDirPath}/${files[0].name}`) expect(stats.type).to.equal('file') diff --git a/test/write.spec.js b/test/write.spec.js index 7f3bce216e..5893f7e858 100644 --- a/test/write.spec.js +++ b/test/write.spec.js @@ -5,21 +5,16 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect const isNode = require('detect-node') -const values = require('pull-stream/sources/values') -const bufferStream = require('pull-buffer-stream') const multihash = require('multihashes') -const randomBytes = require('./helpers/random-bytes') const util = require('util') -const { - collectLeafCids, - createMfs, - cidAtPath, - createShardedDirectory, - createTwoShards, - createShard -} = require('./helpers') -const CID = require('cids') +const createMfs = require('./helpers/create-mfs') +const cidAtPath = require('./helpers/cid-at-path') +const traverseLeafNodes = require('./helpers/traverse-leaf-nodes') +const createShard = require('./helpers/create-shard') +const createShardedDirectory = require('./helpers/create-sharded-directory') +const createTwoShards = require('./helpers/create-two-shards') const crypto = require('crypto') +const all = require('async-iterator-all') let fs, tempWrite @@ -30,11 +25,10 @@ if (isNode) { describe('write', () => { let mfs - let smallFile = randomBytes(13) - let largeFile = randomBytes(490668) + let smallFile = crypto.randomBytes(13) + let largeFile = crypto.randomBytes(490668) const runTest = (fn) => { - let i = 0 const iterations = 5 const files = [{ type: 'Small file', @@ -49,19 +43,12 @@ describe('write', () => { }, { type: 'Really large file', path: `/really-large-file-${Math.random()}.jpg`, - content: (end, callback) => { - if (end) { - return callback(end) + content: { + [Symbol.asyncIterator]: async function * () { + for (let i = 0; i < iterations; i++) { + yield largeFile + } } - - if (i === iterations) { - // Ugh. https://github.com/standard/standard/issues/623 - const foo = true - return callback(foo) - } - - i++ - callback(null, largeFile) }, contentSize: largeFile.length * iterations }] @@ -82,7 +69,7 @@ describe('write', () => { }) throw new Error('Did not fail to convert -1 into a pull stream source') } catch (err) { - expect(err.message).to.contain('Don\'t know how to convert -1 into a pull stream source') + expect(err.code).to.equal('ERR_INVALID_PARAMS') } }) @@ -93,7 +80,7 @@ describe('write', () => { }) throw new Error('Did not object to invalid paths') } catch (err) { - expect(err.message).to.contain('paths must start with a leading /') + expect(err.code).to.equal('ERR_INVALID_PATH') } }) @@ -104,7 +91,7 @@ describe('write', () => { }) throw new Error('Did not object to negative write offset') } catch (err) { - expect(err.message).to.contain('cannot have negative write offset') + expect(err.code).to.equal('ERR_INVALID_PARAMS') } }) @@ -115,7 +102,7 @@ describe('write', () => { }) throw new Error('Did not object to negative byte count') } catch (err) { - expect(err.message).to.contain('cannot have negative byte count') + expect(err.code).to.equal('ERR_INVALID_PARAMS') } }) @@ -125,9 +112,7 @@ describe('write', () => { create: true }) - const files = await mfs.ls('/', { - long: true - }) + const files = await all(mfs.ls('/')) expect(files.length).to.equal(1) expect(files[0].name).to.equal('foo.txt') @@ -199,18 +184,6 @@ describe('write', () => { expect(stats.size).to.equal(smallFile.length) }) - it('writes a small file using a pull stream source', async function () { - const filePath = `/small-file-${Math.random()}.txt` - - await mfs.write(filePath, values([smallFile]), { - create: true - }) - - const stats = await mfs.stat(filePath) - - expect(stats.size).to.equal(smallFile.length) - }) - it('writes a small file using an HTML5 Blob (Browser only)', async function () { if (!global.Blob) { return this.skip() @@ -310,7 +283,8 @@ describe('write', () => { length: 2 }) - const buffer = await mfs.read(path) + const buffer = Buffer.concat(await all(mfs.read(path))) + expect(buffer.length).to.equal(2) }) }) @@ -322,15 +296,19 @@ describe('write', () => { await mfs.write(path, content, { create: true }) + + expect((await mfs.stat(path)).size).to.equal(contentSize) + await mfs.write(path, newContent) const stats = await mfs.stat(path) expect(stats.size).to.equal(contentSize) - const buffer = await mfs.read(path, { + const buffer = Buffer.concat(await all(mfs.read(path, { offset: 0, length: newContent.length - }) + }))) + expect(buffer).to.deep.equal(newContent) }) }) @@ -347,10 +325,11 @@ describe('write', () => { const stats = await mfs.stat(path) expect(stats.size).to.equal(offset + contentSize) - const buffer = await mfs.read(path, { + const buffer = Buffer.concat(await all(mfs.read(path, { offset: 0, length: offset - }) + }))) + expect(buffer).to.deep.equal(Buffer.alloc(offset, 0)) }) }) @@ -371,9 +350,10 @@ describe('write', () => { const stats = await mfs.stat(path) expect(stats.size).to.equal(contentSize + newContent.length - 1) - const buffer = await mfs.read(path, { - offset - }) + const buffer = Buffer.concat(await all(mfs.read(path, { + offset: offset + }))) + expect(buffer).to.deep.equal(newContent) }) }) @@ -393,9 +373,10 @@ describe('write', () => { const stats = await mfs.stat(path) expect(stats.size).to.equal(newContent.length + offset) - const buffer = await mfs.read(path, { + const buffer = Buffer.concat(await all(mfs.read(path, { offset: offset - 5 - }) + }))) + expect(buffer).to.deep.equal(Buffer.concat([Buffer.from([0, 0, 0, 0, 0]), newContent])) }) }) @@ -414,50 +395,12 @@ describe('write', () => { const stats = await mfs.stat(path) expect(stats.size).to.equal(newContent.length) - const buffer = await mfs.read(path) - expect(buffer).to.deep.equal(newContent) - }) - }) - - runTest(({ type, path, content }) => { - it(`truncates a file after writing with a stream (${type})`, async () => { - const newContent = Buffer.from('Oh hai!') - const stream = values([newContent]) - - await mfs.write(path, content, { - create: true - }) - await mfs.write(path, stream, { - truncate: true - }) - - const stats = await mfs.stat(path) - expect(stats.size).to.equal(newContent.length) + const buffer = Buffer.concat(await all(mfs.read(path))) - const buffer = await mfs.read(path) expect(buffer).to.deep.equal(newContent) }) }) - runTest(({ type, path, content }) => { - it(`truncates a file after writing with a stream with an offset (${type})`, async () => { - const offset = 100 - const newContent = Buffer.from('Oh hai!') - const stream = values([newContent]) - - await mfs.write(path, content, { - create: true - }) - await mfs.write(path, stream, { - truncate: true, - offset - }) - - const stats = await mfs.stat(path) - expect(stats.size).to.equal(offset + newContent.length) - }) - }) - runTest(({ type, path, content }) => { it(`writes a file with raw blocks for newly created leaf nodes (${type})`, async () => { await mfs.write(path, content, { @@ -466,11 +409,10 @@ describe('write', () => { }) const stats = await mfs.stat(path) - const cids = await collectLeafCids(mfs, stats.hash) - const rawNodes = cids - .filter(cid => cid.codec === 'raw') - expect(rawNodes).to.not.be.empty() + for await (const { cid } of traverseLeafNodes(mfs, stats.cid)) { + expect(cid.codec).to.equal('raw') + } }) }) @@ -480,7 +422,7 @@ describe('write', () => { for (let i = 0; i < 10; i++) { files.push({ name: `source-file-${Math.random()}.txt`, - source: bufferStream(100) + source: crypto.randomBytes(100) }) } @@ -491,7 +433,7 @@ describe('write', () => { })) ) - const listing = await mfs.ls('/concurrent') + const listing = await all(mfs.ls('/concurrent')) expect(listing.length).to.equal(files.length) listing.forEach(listedFile => { @@ -500,18 +442,8 @@ describe('write', () => { }) it('rewrites really big files', async function () { - let expectedBytes = Buffer.alloc(0) - let originalBytes = Buffer.alloc(0) - const initialStream = bufferStream(1024 * 300, { - collector: (bytes) => { - originalBytes = Buffer.concat([originalBytes, bytes]) - } - }) - const newDataStream = bufferStream(1024 * 300, { - collector: (bytes) => { - expectedBytes = Buffer.concat([expectedBytes, bytes]) - } - }) + const initialStream = crypto.randomBytes(1024 * 300) + const newDataStream = crypto.randomBytes(1024 * 300) const fileName = `/rewrite/file-${Math.random()}.txt` @@ -524,19 +456,19 @@ describe('write', () => { offset: 0 }) - const actualBytes = await mfs.read(fileName) + const actualBytes = Buffer.concat(await all(mfs.read(fileName))) - for (var i = 0; i < expectedBytes.length; i++) { - if (expectedBytes[i] !== actualBytes[i]) { - if (originalBytes[i] === actualBytes[i]) { - throw new Error(`Bytes at index ${i} were not overwritten - expected ${expectedBytes[i]} actual ${originalBytes[i]}`) + for (var i = 0; i < newDataStream.length; i++) { + if (newDataStream[i] !== actualBytes[i]) { + if (initialStream[i] === actualBytes[i]) { + throw new Error(`Bytes at index ${i} were not overwritten - expected ${newDataStream[i]} actual ${initialStream[i]}`) } - throw new Error(`Bytes at index ${i} not equal - expected ${expectedBytes[i]} actual ${actualBytes[i]}`) + throw new Error(`Bytes at index ${i} not equal - expected ${newDataStream[i]} actual ${actualBytes[i]}`) } } - expect(actualBytes).to.deep.equal(expectedBytes) + expect(actualBytes).to.deep.equal(newDataStream) }) it('shards a large directory when writing too many links to it', async () => { @@ -565,9 +497,9 @@ describe('write', () => { expect((await mfs.stat(dirPath)).type).to.equal('hamt-sharded-directory') - const files = await mfs.ls(dirPath, { + const files = await all(mfs.ls(dirPath, { long: true - }) + })) // new file should be in directory expect(files.filter(file => file.name === newFile).pop()).to.be.ok() @@ -586,17 +518,17 @@ describe('write', () => { // should still be a sharded directory expect((await mfs.stat(shardedDirPath)).type).to.equal('hamt-sharded-directory') - const files = await mfs.ls(shardedDirPath, { + const files = await all(mfs.ls(shardedDirPath, { long: true - }) + })) // new file should be in the directory expect(files.filter(file => file.name === newFile).pop()).to.be.ok() // should be able to ls new file directly - expect(await mfs.ls(newFilePath, { + expect(await all(mfs.ls(newFilePath, { long: true - })).to.not.be.empty() + }))).to.not.be.empty() }) it('overwrites a file in a sharded directory when positions do not match', async () => { @@ -618,12 +550,14 @@ describe('write', () => { }) // read the file back - expect(await mfs.read(newFilePath)).to.deep.equal(newContent) + const buffer = Buffer.concat(await all(mfs.read(newFilePath))) + + expect(buffer).to.deep.equal(newContent) // should be able to ls new file directly - expect(await mfs.ls(newFilePath, { + expect(await all(mfs.ls(newFilePath, { long: true - })).to.not.be.empty() + }))).to.not.be.empty() }) it('overwrites file in a sharded directory', async () => { @@ -645,12 +579,14 @@ describe('write', () => { }) // read the file back - expect(await mfs.read(newFilePath)).to.deep.equal(newContent) + const buffer = Buffer.concat(await all(mfs.read(newFilePath))) + + expect(buffer).to.deep.equal(newContent) // should be able to ls new file directly - expect(await mfs.ls(newFilePath, { + expect(await all(mfs.ls(newFilePath, { long: true - })).to.not.be.empty() + }))).to.not.be.empty() }) it('overwrites a file in a subshard of a sharded directory', async () => { @@ -672,12 +608,14 @@ describe('write', () => { }) // read the file back - expect(await mfs.read(newFilePath)).to.deep.equal(newContent) + const buffer = Buffer.concat(await all(mfs.read(newFilePath))) + + expect(buffer).to.deep.equal(newContent) // should be able to ls new file directly - expect(await mfs.ls(newFilePath, { + expect(await all(mfs.ls(newFilePath, { long: true - })).to.not.be.empty() + }))).to.not.be.empty() }) it('writes a file with a different CID version to the parent', async () => { @@ -700,7 +638,7 @@ describe('write', () => { expect((await cidAtPath(filePath, mfs)).version).to.equal(1) - const actualBytes = await mfs.read(filePath) + const actualBytes = Buffer.concat(await all(mfs.read(filePath))) expect(actualBytes).to.deep.equal(expectedBytes) }) @@ -731,7 +669,7 @@ describe('write', () => { expect((await cidAtPath(filePath, mfs)).version).to.equal(1) - const actualBytes = await mfs.read(filePath) + const actualBytes = Buffer.concat(await all(mfs.read(filePath))) expect(actualBytes).to.deep.equal(expectedBytes) }) @@ -762,7 +700,7 @@ describe('write', () => { expect((await cidAtPath(filePath, mfs)).version).to.equal(1) - const actualBytes = await mfs.read(filePath) + const actualBytes = Buffer.concat(await all(mfs.read(filePath))) expect(actualBytes).to.deep.equal(Buffer.from([5, 0, 1, 2, 3, 10, 11])) }) @@ -788,7 +726,7 @@ describe('write', () => { expect(multihash.decode((await cidAtPath(filePath, mfs)).multihash).name).to.equal('sha2-512') - const actualBytes = await mfs.read(filePath) + const actualBytes = Buffer.concat(await all(mfs.read(filePath))) expect(actualBytes).to.deep.equal(expectedBytes) }) @@ -803,17 +741,17 @@ describe('write', () => { dirPath } = await createTwoShards(mfs.ipld, 75) - await mfs.cp(`/ipfs/${dirWithSomeFiles.toBaseEncodedString()}`, dirPath) + await mfs.cp(`/ipfs/${dirWithSomeFiles}`, dirPath) await mfs.write(nextFile.path, nextFile.content, { create: true }) const stats = await mfs.stat(dirPath) - const updatedDirCid = new CID(stats.hash) + const updatedDirCid = stats.cid expect(stats.type).to.equal('hamt-sharded-directory') - expect(updatedDirCid.toBaseEncodedString()).to.deep.equal(dirWithAllFiles.toBaseEncodedString()) + expect(updatedDirCid.toString()).to.deep.equal(dirWithAllFiles.toString()) }) it('results in the same hash as a sharded directory created by the importer when creating a new subshard', async function () { @@ -826,16 +764,16 @@ describe('write', () => { dirPath } = await createTwoShards(mfs.ipld, 100) - await mfs.cp(`/ipfs/${dirWithSomeFiles.toBaseEncodedString()}`, dirPath) + await mfs.cp(`/ipfs/${dirWithSomeFiles}`, dirPath) await mfs.write(nextFile.path, nextFile.content, { create: true }) const stats = await mfs.stat(dirPath) - const updatedDirCid = new CID(stats.hash) + const updatedDirCid = stats.cid - expect(updatedDirCid.toBaseEncodedString()).to.deep.equal(dirWithAllFiles.toBaseEncodedString()) + expect(updatedDirCid.toString()).to.deep.equal(dirWithAllFiles.toString()) }) it('results in the same hash as a sharded directory created by the importer when adding a file to a subshard', async function () { @@ -848,17 +786,17 @@ describe('write', () => { dirPath } = await createTwoShards(mfs.ipld, 82) - await mfs.cp(`/ipfs/${dirWithSomeFiles.toBaseEncodedString()}`, dirPath) + await mfs.cp(`/ipfs/${dirWithSomeFiles}`, dirPath) await mfs.write(nextFile.path, nextFile.content, { create: true }) const stats = await mfs.stat(dirPath) - const updatedDirCid = new CID(stats.hash) + const updatedDirCid = stats.cid expect(stats.type).to.equal('hamt-sharded-directory') - expect(updatedDirCid.toBaseEncodedString()).to.deep.equal(dirWithAllFiles.toBaseEncodedString()) + expect(updatedDirCid.toString()).to.deep.equal(dirWithAllFiles.toString()) }) it('results in the same hash as a sharded directory created by the importer when adding a file to a subshard of a subshard', async function () { @@ -871,17 +809,17 @@ describe('write', () => { dirPath } = await createTwoShards(mfs.ipld, 2187) - await mfs.cp(`/ipfs/${dirWithSomeFiles.toBaseEncodedString()}`, dirPath) + await mfs.cp(`/ipfs/${dirWithSomeFiles}`, dirPath) await mfs.write(nextFile.path, nextFile.content, { create: true }) const stats = await mfs.stat(dirPath) - const updatedDirCid = new CID(stats.hash) + const updatedDirCid = stats.cid expect(stats.type).to.equal('hamt-sharded-directory') - expect(updatedDirCid.toBaseEncodedString()).to.deep.equal(dirWithAllFiles.toBaseEncodedString()) + expect(updatedDirCid.toString()).to.deep.equal(dirWithAllFiles.toString()) }) it('results in the same hash as a sharded directory created by the importer when causing a subshard of a subshard to be created', async function () { @@ -912,7 +850,7 @@ describe('write', () => { content: crypto.randomBytes(5) }], 1) - await mfs.cp(`/ipfs/${dirCid.toBaseEncodedString()}`, dir) + await mfs.cp(`/ipfs/${dirCid}`, dir) await mfs.write(`${dir}/supermodule_test`, superModuleContent, { create: true @@ -921,8 +859,8 @@ describe('write', () => { await mfs.stat(`${dir}/supermodule_test`) await mfs.stat(`${dir}/node-gr`) - expect(await mfs.read(`${dir}/node-gr`)).to.deep.equal(nodeGrContent) - expect(await mfs.read(`${dir}/supermodule_test`)).to.deep.equal(superModuleContent) + expect(Buffer.concat(await all(mfs.read(`${dir}/node-gr`)))).to.deep.equal(nodeGrContent) + expect(Buffer.concat(await all(mfs.read(`${dir}/supermodule_test`)))).to.deep.equal(superModuleContent) await mfs.rm(`${dir}/supermodule_test`) @@ -944,7 +882,7 @@ describe('write', () => { content: buf }], 1) - await mfs.cp(`/ipfs/${dirCid.toBaseEncodedString()}`, dir) + await mfs.cp(`/ipfs/${dirCid}`, dir) await mfs.write(`${dir}/file-1011.txt`, buf, { create: true @@ -952,7 +890,7 @@ describe('write', () => { await mfs.stat(`${dir}/file-1011.txt`) - expect(await mfs.read(`${dir}/file-1011.txt`)).to.deep.equal(buf) + expect(Buffer.concat(await all(mfs.read(`${dir}/file-1011.txt`)))).to.deep.equal(buf) }) it('removes files that cause sub-sub-shards to be removed', async function () { @@ -969,7 +907,7 @@ describe('write', () => { content: buf }], 1) - await mfs.cp(`/ipfs/${dirCid.toBaseEncodedString()}`, dir) + await mfs.cp(`/ipfs/${dirCid}`, dir) await mfs.rm(`${dir}/file-1011.txt`)