jbm*_*sso 13 amazon-s3 node.js formidable knox-amazon-s3-client
我正在尝试使用aws-sdk或knox将通过表单提交的文件直接上传到Amazon S3存储桶.表格处理是强大的.
我的问题是:如何使用aws-sdk(或knox)使用这些库的最新功能处理流来正确使用强大的功能?
我知道这个主题已经在这里被提出了不同的风格,即:
但是,我认为答案有点过时和/或偏离主题(即CORS支持,我现在不希望出于各种原因使用)和/或最重要的是,没有提及最新的功能来自aws-sdk(参见:https://github.com/aws/aws-sdk-js/issues/13#issuecomment-16085442)或knox(特别是putStream()或其readableStream.pipe(req)变体,均解释在文档中).
经过几个小时的挣扎,我得出结论,我需要一些帮助(免责声明:我是一个流媒体新手).
HTML表单:
<form action="/uploadPicture" method="post" enctype="multipart/form-data">
<input name="picture" type="file" accept="image/*">
<input type="submit">
</form>
Run Code Online (Sandbox Code Playgroud)
Express bodyParser中间件以这种方式配置:
app.use(express.bodyParser({defer: true}))
Run Code Online (Sandbox Code Playgroud)
POST请求处理程序:
uploadPicture = (req, res, next) ->
form = new formidable.IncomingForm()
form.parse(req)
form.onPart = (part) ->
if not part.filename
# Let formidable handle all non-file parts (fields)
form.handlePart(part)
else
handlePart(part, form.bytesExpected)
handlePart = (part, fileSize) ->
# aws-sdk version
params =
Bucket: "mybucket"
Key: part.filename
ContentLength: fileSize
Body: part # passing stream object as body parameter
awsS3client.putObject(params, (err, data) ->
if err
console.log err
else
console.log data
)
Run Code Online (Sandbox Code Playgroud)
但是,我收到以下错误:
{[RequestTimeout:在超时期限内未读取或写入与服务器的套接字连接.空闲连接将被关闭.]
消息:'在超时期限内未读取或写入与服务器的套接字连接.空闲连接将被关闭.',代码:'RequestTimeout',名称:'RequestTimeout',statusCode:400,retryable:false}
以这种方式定制的knox版本的handlePart()函数也惨遭失败:
handlePart = (part, fileSize) ->
headers =
"Content-Length": fileSize
"Content-Type": part.mime
knoxS3client.putStream(part, part.filename, headers, (err, res) ->
if err
console.log err
else
console.log res
)
Run Code Online (Sandbox Code Playgroud)
我还得到一个带有400 statusCode的大型res对象.
在两种情况下,区域都配置为eu-west-1.
补充说明:
节点0.10.12
最新的强大来自npm(1.0.14)
来自npm的最新aws-sdk(1.3.1)
来自npm的最新knox(0.8.3)
tsu*_*suz 10
使用 AWS S3 的 multipartUpload(s3-upload-stream作为工作模块)和 node-formidable 的可读流,您可以通过管道将流上传,如下所示:
var formidable = require('formidable');
var http = require('http');
var util = require('util');
var AWS = require('aws-sdk');
var config = require('./config');
var s3 = new AWS.S3({
accessKeyId: config.get('S3_ACCESS_KEY'),
secretAccessKey: config.get('S3_SECRET_KEY'),
apiVersion: '2006-03-01'
});
var s3Stream = require('s3-upload-stream')(s3);
var bucket = 'bucket-name';
var key = 'abcdefgh';
http.createServer(function(req, res) {
if (req.url == '/upload' && req.method.toLowerCase() == 'post') {
var form = new formidable.IncomingForm();
form.on('progress', function(bytesReceived, bytesExpected) {
//console.log('onprogress', parseInt( 100 * bytesReceived / bytesExpected ), '%');
});
form.on('error', function(err) {
console.log('err',err);
});
// This 'end' is for the client to finish uploading
// upload.on('uploaded') is when the uploading is
// done on AWS S3
form.on('end', function() {
console.log('ended!!!!', arguments);
});
form.on('aborted', function() {
console.log('aborted', arguments);
});
form.onPart = function(part) {
console.log('part',part);
// part looks like this
// {
// readable: true,
// headers:
// {
// 'content-disposition': 'form-data; name="upload"; filename="00video38.mp4"',
// 'content-type': 'video/mp4'
// },
// name: 'upload',
// filename: '00video38.mp4',
// mime: 'video/mp4',
// transferEncoding: 'binary',
// transferBuffer: ''
// }
var start = new Date().getTime();
var upload = s3Stream.upload({
"Bucket": bucket,
"Key": part.filename
});
// Optional configuration
//upload.maxPartSize(20971520); // 20 MB
upload.concurrentParts(5);
// Handle errors.
upload.on('error', function (error) {
console.log('errr',error);
});
upload.on('part', function (details) {
console.log('part',details);
});
upload.on('uploaded', function (details) {
var end = new Date().getTime();
console.log('it took',end-start);
console.log('uploaded',details);
});
// Maybe you could add compress like
// part.pipe(compress).pipe(upload)
part.pipe(upload);
};
form.parse(req, function(err, fields, files) {
res.writeHead(200, {'content-type': 'text/plain'});
res.write('received upload:\n\n');
res.end(util.inspect({fields: fields, files: files}));
});
return;
}
// show a file upload form
res.writeHead(200, {'content-type': 'text/html'});
res.end(
'<form action="/upload" enctype="multipart/form-data" method="post">'+
'<input type="text" name="title"><br>'+
'<input type="file" name="upload" multiple="multiple"><br>'+
'<input type="submit" value="Upload">'+
'</form>'
);
}).listen(8080);
Run Code Online (Sandbox Code Playgroud)
好吧,根据Formidable的创建者,直接流式传输到Amazon S3是不可能的:
S3 API要求您在创建新文件时提供新文件的大小.在完全接收到多部分/表单数据文件之前,此信息不可用.这意味着流媒体是不可能的
实际上,form.bytesExpected指的是整个表单的大小,而不是单个文件的大小.
因此,在上传到S3之前,数据必须首先点击服务器上的内存或磁盘.
归档时间: |
|
查看次数: |
11349 次 |
最近记录: |