Ale*_*lex 7 c# windows performance fsync
从确保数据在磁盘上的信息(http://winntfs.com/2012/11/29/windows-write-caching-part-2-an-overview-for-application-developers/),即使是在例如停电,似乎在Windows平台上,您需要依靠其"fsync"版本FlushFileBuffers来最好地保证缓冲区实际上是从磁盘设备缓存刷新到存储介质本身.如果此信息正确,则FILE_FLAG_NO_BUFFERINGwith 的组合FILE_FLAG_WRITE_THROUGH不会确保刷新设备缓存,而只会影响文件系统缓存.
鉴于我将使用相当大的文件,需要"事务性地"更新,这意味着在事务提交结束时执行"fsync".所以我创建了一个小应用来测试这样做的性能.它基本上使用8次写入执行一批8个内存页大小的随机字节的顺序写入,然后刷新.批处理循环重复,在每隔这么多的书面页面之后记录性能.此外,它有两个可配置的选项:在开始页面写入之前,对刷新进行fsync以及是否将字节写入文件的最后位置.
// Code updated to reflect new results as discussed in answer below.
// 26/Aug/2013: Code updated again to reflect results as discussed in follow up question.
// 28/Aug/2012: Increased file stream buffer to ensure 8 page flushes.
class Program
{
static void Main(string[] args)
{
BenchSequentialWrites(reuseExistingFile:false);
}
public static void BenchSequentialWrites(bool reuseExistingFile = false)
{
Tuple<string, bool, bool, bool, bool>[] scenarios = new Tuple<string, bool, bool, bool, bool>[]
{ // output csv, fsync?, fill end?, write through?, mem map?
Tuple.Create("timing FS-E-B-F.csv", true, false, false, false),
Tuple.Create("timing NS-E-B-F.csv", false, false, false, false),
Tuple.Create("timing FS-LB-B-F.csv", true, true, false, false),
Tuple.Create("timing NS-LB-B-F.csv", false, true, false, false),
Tuple.Create("timing FS-E-WT-F.csv", true, false, true, false),
Tuple.Create("timing NS-E-WT-F.csv", false, false, true, false),
Tuple.Create("timing FS-LB-WT-F.csv", true, true, true, false),
Tuple.Create("timing NS-LB-WT-F.csv", false, true, true, false),
Tuple.Create("timing FS-E-B-MM.csv", true, false, false, true),
Tuple.Create("timing NS-E-B-MM.csv", false, false, false, true),
Tuple.Create("timing FS-LB-B-MM.csv", true, true, false, true),
Tuple.Create("timing NS-LB-B-MM.csv", false, true, false, true),
Tuple.Create("timing FS-E-WT-MM.csv", true, false, true, true),
Tuple.Create("timing NS-E-WT-MM.csv", false, false, true, true),
Tuple.Create("timing FS-LB-WT-MM.csv", true, true, true, true),
Tuple.Create("timing NS-LB-WT-MM.csv", false, true, true, true),
};
foreach (var scenario in scenarios)
{
Console.WriteLine("{0,-12} {1,-16} {2,-16} {3,-16} {4:F2}", "Total pages", "Interval pages", "Total time", "Interval time", "MB/s");
CollectGarbage();
var timingResults = SequentialWriteTest("test.data", !reuseExistingFile, fillEnd: scenario.Item3, nPages: 200 * 1000, fSync: scenario.Item2, writeThrough: scenario.Item4, writeToMemMap: scenario.Item5);
using (var report = File.CreateText(scenario.Item1))
{
report.WriteLine("Total pages,Interval pages,Total bytes,Interval bytes,Total time,Interval time,MB/s");
foreach (var entry in timingResults)
{
Console.WriteLine("{0,-12} {1,-16} {2,-16} {3,-16} {4:F2}", entry.Item1, entry.Item2, entry.Item5, entry.Item6, entry.Item7);
report.WriteLine("{0},{1},{2},{3},{4},{5},{6}", entry.Item1, entry.Item2, entry.Item3, entry.Item4, entry.Item5.TotalSeconds, entry.Item6.TotalSeconds, entry.Item7);
}
}
}
}
public unsafe static IEnumerable<Tuple<long, long, long, long, TimeSpan, TimeSpan, double>> SequentialWriteTest(
string fileName,
bool createNewFile,
bool fillEnd,
long nPages,
bool fSync = true,
bool writeThrough = false,
bool writeToMemMap = false,
long pageSize = 4096)
{
// create or open file and if requested fill in its last byte.
var fileMode = createNewFile ? FileMode.Create : FileMode.OpenOrCreate;
using (var tmpFile = new FileStream(fileName, fileMode, FileAccess.ReadWrite, FileShare.ReadWrite, (int)pageSize))
{
Console.WriteLine("Opening temp file with mode {0}{1}", fileMode, fillEnd ? " and writing last byte." : ".");
tmpFile.SetLength(nPages * pageSize);
if (fillEnd)
{
tmpFile.Position = tmpFile.Length - 1;
tmpFile.WriteByte(1);
tmpFile.Position = 0;
tmpFile.Flush(true);
}
}
// Make sure any flushing / activity has completed
System.Threading.Thread.Sleep(TimeSpan.FromMinutes(1));
System.Threading.Thread.SpinWait(50); // warm up.
var buf = new byte[pageSize];
new Random().NextBytes(buf);
var ms = new System.IO.MemoryStream(buf);
var stopwatch = new System.Diagnostics.Stopwatch();
var timings = new List<Tuple<long, long, long, long, TimeSpan, TimeSpan, double>>();
var pageTimingInterval = 8 * 2000;
var prevPages = 0L;
var prevElapsed = TimeSpan.FromMilliseconds(0);
// Open file
const FileOptions NoBuffering = ((FileOptions)0x20000000);
var options = writeThrough ? (FileOptions.WriteThrough | NoBuffering) : FileOptions.None;
using (var file = new FileStream(fileName, FileMode.Open, FileAccess.ReadWrite, FileShare.ReadWrite, (int)(16 *pageSize), options))
{
stopwatch.Start();
if (writeToMemMap)
{
// write pages through memory map.
using (var mmf = MemoryMappedFile.CreateFromFile(file, Guid.NewGuid().ToString(), file.Length, MemoryMappedFileAccess.ReadWrite, null, HandleInheritability.None, true))
using (var accessor = mmf.CreateViewAccessor(0, file.Length, MemoryMappedFileAccess.ReadWrite))
{
byte* base_ptr = null;
accessor.SafeMemoryMappedViewHandle.AcquirePointer(ref base_ptr);
var offset = 0L;
for (long i = 0; i < nPages / 8; i++)
{
using (var memStream = new UnmanagedMemoryStream(base_ptr + offset, 8 * pageSize, 8 * pageSize, FileAccess.ReadWrite))
{
for (int j = 0; j < 8; j++)
{
ms.CopyTo(memStream);
ms.Position = 0;
}
}
FlushViewOfFile((IntPtr)(base_ptr + offset), (int)(8 * pageSize));
offset += 8 * pageSize;
if (fSync)
FlushFileBuffers(file.SafeFileHandle);
if (((i + 1) * 8) % pageTimingInterval == 0)
timings.Add(Report(stopwatch.Elapsed, ref prevElapsed, (i + 1) * 8, ref prevPages, pageSize));
}
accessor.SafeMemoryMappedViewHandle.ReleasePointer();
}
}
else
{
for (long i = 0; i < nPages / 8; i++)
{
for (int j = 0; j < 8; j++)
{
ms.CopyTo(file);
ms.Position = 0;
}
file.Flush(fSync);
if (((i + 1) * 8) % pageTimingInterval == 0)
timings.Add(Report(stopwatch.Elapsed, ref prevElapsed, (i + 1) * 8, ref prevPages, pageSize));
}
}
}
timings.Add(Report(stopwatch.Elapsed, ref prevElapsed, nPages, ref prevPages, pageSize));
return timings;
}
private static Tuple<long, long, long, long, TimeSpan, TimeSpan, double> Report(TimeSpan elapsed, ref TimeSpan prevElapsed, long curPages, ref long prevPages, long pageSize)
{
var intervalPages = curPages - prevPages;
var intervalElapsed = elapsed - prevElapsed;
var intervalPageSize = intervalPages * pageSize;
var mbps = (intervalPageSize / (1024.0 * 1024.0)) / intervalElapsed.TotalSeconds;
prevElapsed = elapsed;
prevPages = curPages;
return Tuple.Create(curPages, intervalPages, curPages * pageSize, intervalPageSize, elapsed, intervalElapsed, mbps);
}
private static void CollectGarbage()
{
GC.Collect();
GC.WaitForPendingFinalizers();
System.Threading.Thread.Sleep(200);
GC.Collect();
GC.WaitForPendingFinalizers();
System.Threading.Thread.SpinWait(10);
}
[DllImport("kernel32.dll", SetLastError = true)]
static extern bool FlushViewOfFile(
IntPtr lpBaseAddress, int dwNumBytesToFlush);
[DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
static extern bool FlushFileBuffers(SafeFileHandle hFile);
}
Run Code Online (Sandbox Code Playgroud)
我得到的性能结果(64位Win 7,慢速主轴磁盘)不是很令人鼓舞.似乎"fsync"性能在很大程度上取决于被刷新文件的大小,这使得时间占主导地位,而不是要刷新的"脏"数据量.下图显示了小基准应用程序的4种不同设置选项的结果.

正如您所看到的,随着文件的增长,"fsync"的性能呈指数级下降(直到几GB才真正停止).此外,磁盘本身似乎没有做很多事情(即资源监视器显示其活动时间仅为几个百分点,并且其磁盘队列在大多数情况下大部分都是空的).
我显然希望"fsync"性能比正常的缓冲刷新要差一些,但我原本预计它会或多或少地保持不变并与文件大小无关.像这样似乎表明它不能与单个大文件结合使用.
有人有解释,不同的经验或不同的解决方案,可以确保数据在磁盘上,并具有或多或少的恒定,可预测的性能?
更新 请参阅下面的答案中的新信息.
您的测试显示同步运行速度呈指数下降,因为您每次都重新创建文件。在这种情况下,它不再是纯粹的顺序写入 - 每次写入也会增加文件,这需要多次查找来更新文件系统中的文件元数据。如果您使用预先存在的完全分配的文件运行所有这些作业,您会看到更快的结果,因为这些元数据更新都不会干扰。
我在我的 Linux 机器上进行了类似的测试。每次重新创建文件时的结果:
mmap direct last sync time
0 0 0 0 0.882293s
0 0 0 1 27.050636s
0 0 1 0 0.832495s
0 0 1 1 26.966625s
0 1 0 0 5.775266s
0 1 0 1 22.063392s
0 1 1 0 5.265739s
0 1 1 1 24.203251s
1 0 0 0 1.031684s
1 0 0 1 28.244678s
1 0 1 0 1.031888s
1 0 1 1 29.540660s
1 1 0 0 1.032883s
1 1 0 1 29.408005s
1 1 1 0 1.035110s
1 1 1 1 28.948555s
Run Code Online (Sandbox Code Playgroud)
使用预先存在的文件的结果(显然,last_byte 情况在这里无关紧要。此外,第一个结果也必须创建该文件):
mmap direct last sync time
0 0 0 0 1.199310s
0 0 0 1 7.858803s
0 0 1 0 0.184925s
0 0 1 1 8.320572s
0 1 0 0 4.047780s
0 1 0 1 4.066993s
0 1 1 0 4.042564s
0 1 1 1 4.307159s
1 0 0 0 3.596712s
1 0 0 1 8.284428s
1 0 1 0 0.242584s
1 0 1 1 8.070947s
1 1 0 0 0.240500s
1 1 0 1 8.213450s
1 1 1 0 0.240922s
1 1 1 1 8.265024s
Run Code Online (Sandbox Code Playgroud)
(请注意,我只使用了 10,000 个块,而不是 25,000 个块,所以这只是使用 ext2 文件系统写入 320MB。我手边没有更大的 ext2fs,我更大的 fs 是 XFS,它拒绝允许 mmap+direct I/O .)
如果您有兴趣,这是代码:
#define _GNU_SOURCE 1
#include <malloc.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#define USE_MMAP 8
#define USE_DIRECT 4
#define USE_LAST 2
#define USE_SYNC 1
#define PAGE 4096
#define CHUNK (8*PAGE)
#define NCHUNKS 10000
#define STATI 1000
#define FSIZE (NCHUNKS*CHUNK)
main()
{
int i, j, fd, rc, stc;
char *data = valloc(CHUNK);
char *map, *dst;
char sfname[8];
struct timeval start, end, stats[NCHUNKS/STATI+1];
FILE *sfile;
printf("mmap\tdirect\tlast\tsync\ttime\n");
for (i=0; i<16; i++) {
int oflag = O_CREAT|O_RDWR|O_TRUNC;
if (i & USE_DIRECT)
oflag |= O_DIRECT;
fd = open("dummy", oflag, 0666);
ftruncate(fd, FSIZE);
if (i & USE_LAST) {
lseek(fd, 0, SEEK_END);
write(fd, data, 1);
lseek(fd, 0, SEEK_SET);
}
if (i & USE_MMAP) {
map = mmap(NULL, FSIZE, PROT_WRITE, MAP_SHARED, fd, 0);
if (map == (char *)-1L) {
perror("mmap");
exit(1);
}
dst = map;
}
sprintf(sfname, "%x.csv", i);
sfile = fopen(sfname, "w");
stc = 1;
printf("%d\t%d\t%d\t%d\t",
(i&USE_MMAP)!=0, (i&USE_DIRECT)!=0, (i&USE_LAST)!=0, i&USE_SYNC);
fflush(stdout);
gettimeofday(&start, NULL);
stats[0] = start;
for (j = 1; j<=NCHUNKS; j++) {
if (i & USE_MMAP) {
memcpy(dst, data, CHUNK);
if (i & USE_SYNC)
msync(dst, CHUNK, MS_SYNC);
dst += CHUNK;
} else {
write(fd, data, CHUNK);
if (i & USE_SYNC)
fdatasync(fd);
}
if (!(j % STATI)) {
gettimeofday(&end, NULL);
stats[stc++] = end;
}
}
end.tv_usec -= start.tv_usec;
if (end.tv_usec < 0) {
end.tv_sec--;
end.tv_usec += 1000000;
}
end.tv_sec -= start.tv_sec;
printf(" %d.%06ds\n", (int)end.tv_sec, (int)end.tv_usec);
if (i & USE_MMAP)
munmap(map, FSIZE);
close(fd);
for (j=NCHUNKS/STATI; j>0; j--) {
stats[j].tv_usec -= stats[j-1].tv_usec;
if (stats[j].tv_usec < 0) {
stats[j].tv_sec--;
stats[j].tv_usec+= 1000000;
}
stats[j].tv_sec -= stats[j-1].tv_sec;
}
for (j=1; j<=NCHUNKS/STATI; j++)
fprintf(sfile, "%d\t%d.%06d\n", j*STATI*CHUNK,
(int)stats[j].tv_sec, (int)stats[j].tv_usec);
fclose(sfile);
}
}
Run Code Online (Sandbox Code Playgroud)
我进行了更多的实验和测试,找到了一个我可以接受的解决方案(尽管目前我只测试了顺序写入)。在这个过程中,我发现了一些意想不到的行为,引发了许多新问题。我将针对这些问题发布一个新的 SO 问题(寻求解释/信息:Windows 使用“fsync”(FlushFileBuffers)写入 I/O 性能)。
我在基准测试中添加了以下两个附加选项:
FILE_FLAG_NO_BUFFERING和FILE_FLAG_WRITE_THROUGH标志)这为我提供了一些意想不到的结果,其中之一为我的问题提供了或多或少可以接受的解决方案。当“fsyncing”与无缓冲/直写 I/O 结合使用时,我没有观察到写入速度呈指数衰减。因此(尽管速度不是很快),这为我提供了一种解决方案,可以确保数据位于磁盘上,并且具有不受文件大小影响的恒定可预测性能。
其他一些意想不到的结果如下:
我已将用于基准测试的更新代码添加到我原来的问题中。
下图显示了一些额外的新结果。

| 归档时间: |
|
| 查看次数: |
5677 次 |
| 最近记录: |