我有以下代码在Azure函数中运行(代码来自堆栈溢出here),大部分时间都按照它应该下载的大文件。但是,有时它只是停止向文件添加数据,并且从不再次启动。文件越大,发生的次数越多。我没有得到任何错误,什么都没有。在没有进步的情况下,例如10秒后,还是有其他方式来唤醒整个过程,或者有其他方式来关注过程?天青下载node.js超时
var azure = require('azure-storage');
var fs = require('fs');
module.exports = function (context, input) {
context.done();
var accessKey = 'myaccesskey';
var storageAccount = 'mystorageaccount';
var containerName = 'mycontainer';
var blobService = azure.createBlobService(storageAccount, accessKey);
var recordName = "a_large_movie.mov";
var blobName = "standard/mov/" + recordName;
var blobSize;
var chunkSize = (1024 * 512) * 8; // I'm experimenting with this variable
var startPos = 0;
var fullPath = "D:/home/site/wwwroot/myAzureFunction/input/";
var blobProperties = blobService.getBlobProperties(containerName, blobName, null, function (error, blob) {
if (error) {
throw error;
}
else {
blobSize = blob.contentLength;
context.log('Registered length: ' + blobSize);
fullPath = fullPath + recordName;
console.log(fullPath);
doDownload();
}
}
);
function doDownload() {
var stream = fs.createWriteStream(fullPath, {flags: 'a'});
var endPos = startPos + chunkSize;
if (endPos > blobSize) {
endPos = blobSize;
context.log('Reached end of file endPos: ' + endPos);
}
context.log("Downloading " + (endPos - startPos) + " bytes starting from " + startPos + " marker.");
blobService.getBlobToStream(
containerName,
blobName,
stream,
{
"rangeStart": startPos,
"rangeEnd": endPos-1
},
function(error) {
if (error) {
throw error;
}
else if (!error) {
startPos = endPos;
if (startPos <= blobSize - 1) {
doDownload();
}
}
}
);
}
};
您是否尝试将块大小从4MB减少到1MB? –
不是1 MB具体。不过,我以512 KB的块大小开始,并且它给出了与上面提到的相同的偶尔超时 –
Hi @GauravMantri - 我将块更改为1MB,它确实使它更好。但是,现在一个大文件仍然超时,所以问题没有消失。但它确实改善了它。块大小和可能的超时之间的连接是什么? –