Skip to content

Commit

Permalink
fix the bug of nodejs stream read chunk size not fixed, which caused …
Browse files Browse the repository at this point in the history
…the error like Error: block size must be not greater than 4194304
  • Loading branch information
jemygraw committed Jun 14, 2018
1 parent 48f5ca6 commit 30a7280
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 8 deletions.
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
## CHANGE LOG

## v7.2.0
- 修复node的stream读取的chunk大小比较随意的问题

## v7.1.9
- 修复新版node下resume up方式文件内容被缓存而导致的上传失败

Expand Down
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "qiniu",
"version": "7.1.9",
"version": "7.2.0",
"description": "Node wrapper for Qiniu Resource (Cloud) Storage API",
"main": "index.js",
"directories": {
Expand Down
25 changes: 18 additions & 7 deletions qiniu/storage/resume.js
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,8 @@ function putReq(config, uploadToken, key, rsStream, rsStreamLen, putExtra,
var finishedBlock = 0;
var curBlock = 0;
var readLen = 0;
var bufferLen = 0;
var remainedData = new Buffer(0);
var readBuffers = [];
var finishedCtxList = [];
var finishedBlkPutRets = [];
Expand Down Expand Up @@ -151,19 +153,26 @@ function putReq(config, uploadToken, key, rsStream, rsStreamLen, putExtra,
//check when to mkblk
rsStream.on('data', function(chunk) {
readLen += chunk.length;
bufferLen += chunk.length;
readBuffers.push(chunk);

if (readLen % conf.BLOCK_SIZE == 0 || readLen == fileSize) {
//console.log(readLen);
var readData = Buffer.concat(readBuffers);
readBuffers = []; //reset read buffer
if (bufferLen >= conf.BLOCK_SIZE || readLen == fileSize) {
var readBuffersData = Buffer.concat(readBuffers);
var blockSize = conf.BLOCK_SIZE - remainedData.length;

var postData = Buffer.concat([remainedData, readBuffersData.slice(0,blockSize)]);
remainedData = new Buffer(readBuffersData.slice(blockSize,bufferLen));
bufferLen = bufferLen - conf.BLOCK_SIZE;
//reset buffer
readBuffers = [];

curBlock += 1; //set current block
if (curBlock > finishedBlock) {
rsStream.pause();
mkblkReq(upDomain, uploadToken, readData, function(respErr,
mkblkReq(upDomain, uploadToken, postData, function(respErr,
respBody,
respInfo) {
var bodyCrc32 = parseInt("0x" + getCrc32(readData));
var bodyCrc32 = parseInt("0x" + getCrc32(postData));
if (respInfo.statusCode != 200 || respBody.crc32 != bodyCrc32) {
callbackFunc(respErr, respBody, respInfo);
rsStream.close();
Expand Down Expand Up @@ -258,7 +267,9 @@ function mkfileReq(upDomain, uploadToken, fileSize, ctxList, key, putExtra,
ResumeUploader.prototype.putFile = function(uploadToken, key, localFile,
putExtra, callbackFunc) {
putExtra = putExtra || new PutExtra();
var rsStream = fs.createReadStream(localFile);
var rsStream = fs.createReadStream(localFile,{
highWaterMark: conf.BLOCK_SIZE,
});
var rsStreamLen = fs.statSync(localFile).size;
if (!putExtra.mimeType) {
putExtra.mimeType = mime.getType(localFile);
Expand Down

0 comments on commit 30a7280

Please sign in to comment.