这就是我在完美世界中所做的事情:
fs.open('somepath', 'r+', function(err, fd) {
fs.write(fd, 'somedata', function(err, written, string) {
fs.rewind(fd, 0) //this doesn't exist
})
})
Run Code Online (Sandbox Code Playgroud)
这是我目前的实施:
return async.waterfall([
function(next) {
//opening a file descriptor to write some data
return fs.open('somepath', 'w+', next)
},
function(fd, next) {
//writing the data
return fs.write(fd, 'somedata', function(err, written, string) {
return next(null, fd)
})
},
function(fd, next) {
//closing the file descriptor
return fs.close(fd, next)
},
function(next) {
//open again to reset cursor position
return fs.open('somepath', 'r', next)
}
], …Run Code Online (Sandbox Code Playgroud) 我想使用node.js有效地读取一个非常大的文件的最后X个字节.这样做最有效的方法是什么?
据我所知,这样做的唯一方法是创建一个读取流并循环,直到命中字节索引.
例:
// lets assume I want the last 10 bytes;
// I would open a stream and loop until I reach the end of the file
// Once I did I would go to the last 10 bytes I kept in memory
let f = fs.createReadStream('file.xpto'); //which is a 1gb file
let data = [];
f.on('data', function(data){
for (d of data){
data.push(d)
data = data.slice(1,11); //keep only 10 elements
}
})
f.on('end', function(){
// check data
console.log('Last test …Run Code Online (Sandbox Code Playgroud)