Chr*_*rod 11 assembly gcc llvm avx julia
我想更好地理解为什么两个非常相似的代码片段在我的计算机上表现得截然不同.这些测试是在Ryzen处理器上使用gcc-trunk和Julia 0.7-alpha(LLVM 6.0).gcc-8看似相似,而Julia 0.6.3(LLVM 3.9)略慢于v0.7.
我编写了生成函数(想想C++模板),为矩阵运算生成展开代码,以及一个简单的转换器,可以将简单的代码转换为Fortran.
对于8x8矩阵乘法,这是Fortran代码的样子:
module mul8mod
implicit none
contains
subroutine mul8x8(A, B, C)
real(8), dimension(64), intent(in) :: A, B
real(8), dimension(64), intent(out) :: C
C(1) = A(1) * B(1) + A(9) * B(2) + A(17) * B(3) + A(25) * B(4)
C(1) = C(1) + A(33) * B(5) + A(41) * B(6) + A(49) * B(7) + A(57) * B(8)
C(2) = A(2) * B(1) + A(10) * B(2) + A(18) * B(3) + A(26) * B(4)
C(2) = C(2) + A(34) * B(5) + A(42) * B(6) + A(50) * B(7) + A(58) * B(8)
C(3) = A(3) * B(1) + A(11) * B(2) + A(19) * B(3) + A(27) * B(4)
C(3) = C(3) + A(35) * B(5) + A(43) * B(6) + A(51) * B(7) + A(59) * B(8)
C(4) = A(4) * B(1) + A(12) * B(2) + A(20) * B(3) + A(28) * B(4)
C(4) = C(4) + A(36) * B(5) + A(44) * B(6) + A(52) * B(7) + A(60) * B(8)
C(5) = A(5) * B(1) + A(13) * B(2) + A(21) * B(3) + A(29) * B(4)
C(5) = C(5) + A(37) * B(5) + A(45) * B(6) + A(53) * B(7) + A(61) * B(8)
C(6) = A(6) * B(1) + A(14) * B(2) + A(22) * B(3) + A(30) * B(4)
C(6) = C(6) + A(38) * B(5) + A(46) * B(6) + A(54) * B(7) + A(62) * B(8)
C(7) = A(7) * B(1) + A(15) * B(2) + A(23) * B(3) + A(31) * B(4)
C(7) = C(7) + A(39) * B(5) + A(47) * B(6) + A(55) * B(7) + A(63) * B(8)
C(8) = A(8) * B(1) + A(16) * B(2) + A(24) * B(3) + A(32) * B(4)
C(8) = C(8) + A(40) * B(5) + A(48) * B(6) + A(56) * B(7) + A(64) * B(8)
C(9) = A(1) * B(9) + A(9) * B(10) + A(17) * B(11) + A(25) * B(12)
C(9) = C(9) + A(33) * B(13) + A(41) * B(14) + A(49) * B(15) + A(57) * B(16)
C(10) = A(2) * B(9) + A(10) * B(10) + A(18) * B(11) + A(26) * B(12)
C(10) = C(10) + A(34) * B(13) + A(42) * B(14) + A(50) * B(15) + A(58) * B(16)
C(11) = A(3) * B(9) + A(11) * B(10) + A(19) * B(11) + A(27) * B(12)
C(11) = C(11) + A(35) * B(13) + A(43) * B(14) + A(51) * B(15) + A(59) * B(16)
C(12) = A(4) * B(9) + A(12) * B(10) + A(20) * B(11) + A(28) * B(12)
C(12) = C(12) + A(36) * B(13) + A(44) * B(14) + A(52) * B(15) + A(60) * B(16)
C(13) = A(5) * B(9) + A(13) * B(10) + A(21) * B(11) + A(29) * B(12)
C(13) = C(13) + A(37) * B(13) + A(45) * B(14) + A(53) * B(15) + A(61) * B(16)
C(14) = A(6) * B(9) + A(14) * B(10) + A(22) * B(11) + A(30) * B(12)
C(14) = C(14) + A(38) * B(13) + A(46) * B(14) + A(54) * B(15) + A(62) * B(16)
C(15) = A(7) * B(9) + A(15) * B(10) + A(23) * B(11) + A(31) * B(12)
C(15) = C(15) + A(39) * B(13) + A(47) * B(14) + A(55) * B(15) + A(63) * B(16)
C(16) = A(8) * B(9) + A(16) * B(10) + A(24) * B(11) + A(32) * B(12)
C(16) = C(16) + A(40) * B(13) + A(48) * B(14) + A(56) * B(15) + A(64) * B(16)
C(17) = A(1) * B(17) + A(9) * B(18) + A(17) * B(19) + A(25) * B(20)
C(17) = C(17) + A(33) * B(21) + A(41) * B(22) + A(49) * B(23) + A(57) * B(24)
C(18) = A(2) * B(17) + A(10) * B(18) + A(18) * B(19) + A(26) * B(20)
C(18) = C(18) + A(34) * B(21) + A(42) * B(22) + A(50) * B(23) + A(58) * B(24)
C(19) = A(3) * B(17) + A(11) * B(18) + A(19) * B(19) + A(27) * B(20)
C(19) = C(19) + A(35) * B(21) + A(43) * B(22) + A(51) * B(23) + A(59) * B(24)
C(20) = A(4) * B(17) + A(12) * B(18) + A(20) * B(19) + A(28) * B(20)
C(20) = C(20) + A(36) * B(21) + A(44) * B(22) + A(52) * B(23) + A(60) * B(24)
C(21) = A(5) * B(17) + A(13) * B(18) + A(21) * B(19) + A(29) * B(20)
C(21) = C(21) + A(37) * B(21) + A(45) * B(22) + A(53) * B(23) + A(61) * B(24)
C(22) = A(6) * B(17) + A(14) * B(18) + A(22) * B(19) + A(30) * B(20)
C(22) = C(22) + A(38) * B(21) + A(46) * B(22) + A(54) * B(23) + A(62) * B(24)
C(23) = A(7) * B(17) + A(15) * B(18) + A(23) * B(19) + A(31) * B(20)
C(23) = C(23) + A(39) * B(21) + A(47) * B(22) + A(55) * B(23) + A(63) * B(24)
C(24) = A(8) * B(17) + A(16) * B(18) + A(24) * B(19) + A(32) * B(20)
C(24) = C(24) + A(40) * B(21) + A(48) * B(22) + A(56) * B(23) + A(64) * B(24)
C(25) = A(1) * B(25) + A(9) * B(26) + A(17) * B(27) + A(25) * B(28)
C(25) = C(25) + A(33) * B(29) + A(41) * B(30) + A(49) * B(31) + A(57) * B(32)
C(26) = A(2) * B(25) + A(10) * B(26) + A(18) * B(27) + A(26) * B(28)
C(26) = C(26) + A(34) * B(29) + A(42) * B(30) + A(50) * B(31) + A(58) * B(32)
C(27) = A(3) * B(25) + A(11) * B(26) + A(19) * B(27) + A(27) * B(28)
C(27) = C(27) + A(35) * B(29) + A(43) * B(30) + A(51) * B(31) + A(59) * B(32)
C(28) = A(4) * B(25) + A(12) * B(26) + A(20) * B(27) + A(28) * B(28)
C(28) = C(28) + A(36) * B(29) + A(44) * B(30) + A(52) * B(31) + A(60) * B(32)
C(29) = A(5) * B(25) + A(13) * B(26) + A(21) * B(27) + A(29) * B(28)
C(29) = C(29) + A(37) * B(29) + A(45) * B(30) + A(53) * B(31) + A(61) * B(32)
C(30) = A(6) * B(25) + A(14) * B(26) + A(22) * B(27) + A(30) * B(28)
C(30) = C(30) + A(38) * B(29) + A(46) * B(30) + A(54) * B(31) + A(62) * B(32)
C(31) = A(7) * B(25) + A(15) * B(26) + A(23) * B(27) + A(31) * B(28)
C(31) = C(31) + A(39) * B(29) + A(47) * B(30) + A(55) * B(31) + A(63) * B(32)
C(32) = A(8) * B(25) + A(16) * B(26) + A(24) * B(27) + A(32) * B(28)
C(32) = C(32) + A(40) * B(29) + A(48) * B(30) + A(56) * B(31) + A(64) * B(32)
C(33) = A(1) * B(33) + A(9) * B(34) + A(17) * B(35) + A(25) * B(36)
C(33) = C(33) + A(33) * B(37) + A(41) * B(38) + A(49) * B(39) + A(57) * B(40)
C(34) = A(2) * B(33) + A(10) * B(34) + A(18) * B(35) + A(26) * B(36)
C(34) = C(34) + A(34) * B(37) + A(42) * B(38) + A(50) * B(39) + A(58) * B(40)
C(35) = A(3) * B(33) + A(11) * B(34) + A(19) * B(35) + A(27) * B(36)
C(35) = C(35) + A(35) * B(37) + A(43) * B(38) + A(51) * B(39) + A(59) * B(40)
C(36) = A(4) * B(33) + A(12) * B(34) + A(20) * B(35) + A(28) * B(36)
C(36) = C(36) + A(36) * B(37) + A(44) * B(38) + A(52) * B(39) + A(60) * B(40)
C(37) = A(5) * B(33) + A(13) * B(34) + A(21) * B(35) + A(29) * B(36)
C(37) = C(37) + A(37) * B(37) + A(45) * B(38) + A(53) * B(39) + A(61) * B(40)
C(38) = A(6) * B(33) + A(14) * B(34) + A(22) * B(35) + A(30) * B(36)
C(38) = C(38) + A(38) * B(37) + A(46) * B(38) + A(54) * B(39) + A(62) * B(40)
C(39) = A(7) * B(33) + A(15) * B(34) + A(23) * B(35) + A(31) * B(36)
C(39) = C(39) + A(39) * B(37) + A(47) * B(38) + A(55) * B(39) + A(63) * B(40)
C(40) = A(8) * B(33) + A(16) * B(34) + A(24) * B(35) + A(32) * B(36)
C(40) = C(40) + A(40) * B(37) + A(48) * B(38) + A(56) * B(39) + A(64) * B(40)
C(41) = A(1) * B(41) + A(9) * B(42) + A(17) * B(43) + A(25) * B(44)
C(41) = C(41) + A(33) * B(45) + A(41) * B(46) + A(49) * B(47) + A(57) * B(48)
C(42) = A(2) * B(41) + A(10) * B(42) + A(18) * B(43) + A(26) * B(44)
C(42) = C(42) + A(34) * B(45) + A(42) * B(46) + A(50) * B(47) + A(58) * B(48)
C(43) = A(3) * B(41) + A(11) * B(42) + A(19) * B(43) + A(27) * B(44)
C(43) = C(43) + A(35) * B(45) + A(43) * B(46) + A(51) * B(47) + A(59) * B(48)
C(44) = A(4) * B(41) + A(12) * B(42) + A(20) * B(43) + A(28) * B(44)
C(44) = C(44) + A(36) * B(45) + A(44) * B(46) + A(52) * B(47) + A(60) * B(48)
C(45) = A(5) * B(41) + A(13) * B(42) + A(21) * B(43) + A(29) * B(44)
C(45) = C(45) + A(37) * B(45) + A(45) * B(46) + A(53) * B(47) + A(61) * B(48)
C(46) = A(6) * B(41) + A(14) * B(42) + A(22) * B(43) + A(30) * B(44)
C(46) = C(46) + A(38) * B(45) + A(46) * B(46) + A(54) * B(47) + A(62) * B(48)
C(47) = A(7) * B(41) + A(15) * B(42) + A(23) * B(43) + A(31) * B(44)
C(47) = C(47) + A(39) * B(45) + A(47) * B(46) + A(55) * B(47) + A(63) * B(48)
C(48) = A(8) * B(41) + A(16) * B(42) + A(24) * B(43) + A(32) * B(44)
C(48) = C(48) + A(40) * B(45) + A(48) * B(46) + A(56) * B(47) + A(64) * B(48)
C(49) = A(1) * B(49) + A(9) * B(50) + A(17) * B(51) + A(25) * B(52)
C(49) = C(49) + A(33) * B(53) + A(41) * B(54) + A(49) * B(55) + A(57) * B(56)
C(50) = A(2) * B(49) + A(10) * B(50) + A(18) * B(51) + A(26) * B(52)
C(50) = C(50) + A(34) * B(53) + A(42) * B(54) + A(50) * B(55) + A(58) * B(56)
C(51) = A(3) * B(49) + A(11) * B(50) + A(19) * B(51) + A(27) * B(52)
C(51) = C(51) + A(35) * B(53) + A(43) * B(54) + A(51) * B(55) + A(59) * B(56)
C(52) = A(4) * B(49) + A(12) * B(50) + A(20) * B(51) + A(28) * B(52)
C(52) = C(52) + A(36) * B(53) + A(44) * B(54) + A(52) * B(55) + A(60) * B(56)
C(53) = A(5) * B(49) + A(13) * B(50) + A(21) * B(51) + A(29) * B(52)
C(53) = C(53) + A(37) * B(53) + A(45) * B(54) + A(53) * B(55) + A(61) * B(56)
C(54) = A(6) * B(49) + A(14) * B(50) + A(22) * B(51) + A(30) * B(52)
C(54) = C(54) + A(38) * B(53) + A(46) * B(54) + A(54) * B(55) + A(62) * B(56)
C(55) = A(7) * B(49) + A(15) * B(50) + A(23) * B(51) + A(31) * B(52)
C(55) = C(55) + A(39) * B(53) + A(47) * B(54) + A(55) * B(55) + A(63) * B(56)
C(56) = A(8) * B(49) + A(16) * B(50) + A(24) * B(51) + A(32) * B(52)
C(56) = C(56) + A(40) * B(53) + A(48) * B(54) + A(56) * B(55) + A(64) * B(56)
C(57) = A(1) * B(57) + A(9) * B(58) + A(17) * B(59) + A(25) * B(60)
C(57) = C(57) + A(33) * B(61) + A(41) * B(62) + A(49) * B(63) + A(57) * B(64)
C(58) = A(2) * B(57) + A(10) * B(58) + A(18) * B(59) + A(26) * B(60)
C(58) = C(58) + A(34) * B(61) + A(42) * B(62) + A(50) * B(63) + A(58) * B(64)
C(59) = A(3) * B(57) + A(11) * B(58) + A(19) * B(59) + A(27) * B(60)
C(59) = C(59) + A(35) * B(61) + A(43) * B(62) + A(51) * B(63) + A(59) * B(64)
C(60) = A(4) * B(57) + A(12) * B(58) + A(20) * B(59) + A(28) * B(60)
C(60) = C(60) + A(36) * B(61) + A(44) * B(62) + A(52) * B(63) + A(60) * B(64)
C(61) = A(5) * B(57) + A(13) * B(58) + A(21) * B(59) + A(29) * B(60)
C(61) = C(61) + A(37) * B(61) + A(45) * B(62) + A(53) * B(63) + A(61) * B(64)
C(62) = A(6) * B(57) + A(14) * B(58) + A(22) * B(59) + A(30) * B(60)
C(62) = C(62) + A(38) * B(61) + A(46) * B(62) + A(54) * B(63) + A(62) * B(64)
C(63) = A(7) * B(57) + A(15) * B(58) + A(23) * B(59) + A(31) * B(60)
C(63) = C(63) + A(39) * B(61) + A(47) * B(62) + A(55) * B(63) + A(63) * B(64)
C(64) = A(8) * B(57) + A(16) * B(58) + A(24) * B(59) + A(32) * B(60)
C(64) = C(64) + A(40) * B(61) + A(48) * B(62) + A(56) * B(63) + A(64) * B(64)
end subroutine mul8x8
end module mul8mod
Run Code Online (Sandbox Code Playgroud)
Julia代码看起来很相似,但我首先提取输入的所有元素,处理标量,然后插入它们.我发现在Julia中效果更好,但在Fortran中效果更差.
表达式看起来非常简单,就像没有问题可以向量化它.朱莉娅这么漂亮.更新8x8矩阵:
# Julia benchmark; using YMM vectors
@benchmark mul!($c8, $a8, $b8)
BenchmarkTools.Trial:
memory estimate: 0 bytes
allocs estimate: 0
--------------
minimum time: 57.059 ns (0.00% GC)
median time: 58.901 ns (0.00% GC)
mean time: 59.522 ns (0.00% GC)
maximum time: 83.196 ns (0.00% GC)
--------------
samples: 10000
evals/sample: 984
Run Code Online (Sandbox Code Playgroud)
这很好用.
使用以下代码编译Fortran代码:
gfortran-trunk -march=native -Ofast -mprefer-vector-width=256 -shared -fPIC mul8module1.F08 -o libmul8mod1v15.so
基准测试结果:
# gfortran, using XMM vectors; code was unrolled 8x8 matrix multiplication
@benchmark mul8v15!($c8, $a8, $b8)
BenchmarkTools.Trial:
memory estimate: 0 bytes
allocs estimate: 0
--------------
minimum time: 122.175 ns (0.00% GC)
median time: 128.373 ns (0.00% GC)
mean time: 128.643 ns (0.00% GC)
maximum time: 194.090 ns (0.00% GC)
--------------
samples: 10000
evals/sample: 905
Run Code Online (Sandbox Code Playgroud)
需要大约两倍的时间.用-S查看程序集显示它忽略了我的-mprefer-vector-width = 256,而是使用了xmm寄存器.当我使用指针而不是数组或可变结构时,这或多或少都是我在Julia中获得的(当给定指针时Julia假设别名并编译较慢的版本).
我尝试了生成Fortran代码的各种变体(例如,sum(va * vb)语句,是va和vb4长度向量),但最简单的只是调用内部函数matmul.编译matmul(对于已知的8x8大小)没有-mprefer-vector-width=256,
# gfortran using XMM vectors generated from intrinsic matmul function
@benchmark mul8v2v2!($c8, $a8, $b8)
BenchmarkTools.Trial:
memory estimate: 0 bytes
allocs estimate: 0
--------------
minimum time: 92.983 ns (0.00% GC)
median time: 96.366 ns (0.00% GC)
mean time: 97.651 ns (0.00% GC)
maximum time: 166.845 ns (0.00% GC)
--------------
samples: 10000
evals/sample: 954
Run Code Online (Sandbox Code Playgroud)
并用它编译:
# gfortran using YMM vectors with intrinsic matmul
@benchmark mul8v2v1!($c8, $a8, $b8)
BenchmarkTools.Trial:
memory estimate: 0 bytes
allocs estimate: 0
--------------
minimum time: 163.667 ns (0.00% GC)
median time: 166.544 ns (0.00% GC)
mean time: 168.320 ns (0.00% GC)
maximum time: 277.291 ns (0.00% GC)
--------------
samples: 10000
evals/sample: 780
Run Code Online (Sandbox Code Playgroud)
无avx的matmul看起来非常快,只能使用xmm寄存器,但强制进入ymm时 - 可怕.
知道发生了什么事吗?我想理解为什么当被指示做同样的事情,并产生非常相似的装配时,一个比另一个快得多.
FWIW,输入数据是8字节对齐的.我尝试了16字节对齐的输入,它似乎没有真正的区别.
我看了一下gfortran生成的程序集(注意,这只是内在的matmul函数):
gfortran-trunk -march=native -Ofast -mprefer-vector-width=256 -shared -fPIC -S mul8module2.F08 -o mul8mod2v1.s
来自Julia/LLVM的那个,得到了@code_native mul!(c8, a8, b8)(展开的矩阵乘法).
如果有人愿意看一下,我会非常乐意分享所有的集会或其他任何东西,但如果我把它包括在这里,我会在这篇文章中达到字符限制.
正确使用ymm寄存器和许多vfmadd__pd指令,还有许多vmovupd,vmulpd和vmovapd.
我注意到的最大区别是,虽然LLVM使用了大量的vbroadcastsd,但gcc却有大量的vunpcklpd和vpermpd指令.
简短的样本; GCC:
vpermpd $216, %ymm7, %ymm7
vpermpd $216, %ymm2, %ymm2
vpermpd $216, %ymm3, %ymm3
vpermpd $216, %ymm5, %ymm5
vunpckhpd %ymm6, %ymm4, %ymm4
vunpcklpd %ymm7, %ymm2, %ymm6
vunpckhpd %ymm7, %ymm2, %ymm2
vunpcklpd %ymm5, %ymm3, %ymm7
vpermpd $216, %ymm15, %ymm15
vpermpd $216, %ymm4, %ymm4
vpermpd $216, %ymm0, %ymm0
vpermpd $216, %ymm1, %ymm1
vpermpd $216, %ymm6, %ymm6
vpermpd $216, %ymm7, %ymm7
vunpckhpd %ymm5, %ymm3, %ymm3
vunpcklpd %ymm15, %ymm0, %ymm5
vunpckhpd %ymm15, %ymm0, %ymm0
vunpcklpd %ymm4, %ymm1, %ymm15
vunpckhpd %ymm4, %ymm1, %ymm1
vunpcklpd %ymm7, %ymm6, %ymm4
vunpckhpd %ymm7, %ymm6, %ymm6
Run Code Online (Sandbox Code Playgroud)
朱莉娅/ LLVM:
vbroadcastsd 8(%rax), %ymm3
vbroadcastsd 72(%rax), %ymm2
vbroadcastsd 136(%rax), %ymm12
vbroadcastsd 200(%rax), %ymm8
vbroadcastsd 264(%rax), %ymm10
vbroadcastsd 328(%rax), %ymm15
vbroadcastsd 392(%rax), %ymm14
vmulpd %ymm7, %ymm0, %ymm1
vmulpd %ymm11, %ymm0, %ymm0
vmovapd %ymm8, %ymm4
Run Code Online (Sandbox Code Playgroud)
这可以解释这个区别吗?为什么gcc在这里会如此优化?有什么方法可以帮助它,以便它可以生成更类似于LLVM的代码吗?
总的来说,gcc倾向于在基准测试中胜过Clang(例如,在Phoronix上)......也许我可以尝试Flang(LLVM后端到Fortran),以及Eigen(使用g ++和clang ++).
重现,matmul内在功能:
module mul8mod
implicit none
contains
subroutine intrinsic_mul8x8(A, B, C)
real(8), dimension(8,8), intent(in) :: A, B
real(8), dimension(8,8), intent(out) :: C
C = matmul(A, B)
end subroutine
end module mul8mod
Run Code Online (Sandbox Code Playgroud)
编译如上,和Julia代码重现基准:
#Pkg.clone("https://github.com/chriselrod/TriangularMatrices.jl")
using TriangularMatrices, BenchmarkTools, Compat
a8 = randmat(8); b8 = randmat(8); c8 = randmat(8);
import TriangularMatrices: mul!
@benchmark mul!($c8, $a8, $b8)
@code_native mul!(c8, a8, b8)
# after compiling into the shared library in libmul8mod2v2.so
# If compiled outside the working directory, replace pwd() accordingly
const libmul8path2v1 = joinpath(pwd(), "libmul8mod2v1.so")
function mul8v2v1!(C, A, B)
ccall((:__mul8mod_MOD_intrinsic_mul8x8, libmul8path2v1),
Cvoid,(Ptr{Cvoid},Ptr{Cvoid},Ptr{Cvoid}),
pointer_from_objref(A),
pointer_from_objref(B),
pointer_from_objref(C))
C
end
@benchmark mul8v2v1!($c8, $a8, $b8)
Run Code Online (Sandbox Code Playgroud)
编辑:
感谢大家的回复!
Because I noticed that the code with the broadcasts is dramatically faster, I decided to rewrite my code-generator to encourage broadcasting. Generated code now looks more like this:
C[1] = B[1] * A[1]
C[2] = B[1] * A[2]
C[3] = B[1] * A[3]
C[4] = B[1] * A[4]
C[5] = B[1] * A[5]
C[6] = B[1] * A[6]
C[7] = B[1] * A[7]
C[8] = B[1] * A[8]
C[1] += B[2] * A[9]
C[2] += B[2] * A[10]
C[3] += B[2] * A[11]
C[4] += B[2] * A[12]
C[5] += B[2] * A[13]
C[6] += B[2] * A[14]
C[7] += B[2] * A[15]
C[8] += B[2] * A[16]
C[1] += B[3] * A[17]
...
Run Code Online (Sandbox Code Playgroud)
I am intending for the compiler to broadcast B, and then use repeated vectorized fma instructions. Julia really liked this rewrite:
# Julia benchmark; using YMM vectors
@benchmark mul2!($c, $a, $b)
BenchmarkTools.Trial:
memory estimate: 0 bytes
allocs estimate: 0
--------------
minimum time: 45.156 ns (0.00% GC)
median time: 47.058 ns (0.00% GC)
mean time: 47.390 ns (0.00% GC)
maximum time: 62.066 ns (0.00% GC)
--------------
samples: 10000
evals/sample: 990
Run Code Online (Sandbox Code Playgroud)
Figuring it was llvm being smart, I also built Flang (Fortran frontend to llvm):
# compiled with
# flang -march=native -Ofast -mprefer-vector-width=256 -shared -fPIC mul8module6.f95 -o libmul8mod6v2.so
@benchmark mul8v6v2!($c, $a, $b)
BenchmarkTools.Trial:
memory estimate: 0 bytes
allocs estimate: 0
--------------
minimum time: 51.322 ns (0.00% GC)
median time: 52.791 ns (0.00% GC)
mean time: 52.944 ns (0.00% GC)
maximum time: 83.376 ns (0.00% GC)
--------------
samples: 10000
evals/sample: 988
Run Code Online (Sandbox Code Playgroud)
This is also really good. gfortran still refused to use broadcasts, and was still slow.
I've still have questions on how best to generate code. Encouraging broadcasts is obviously the way to go. Right now, I'm basically doing matrix*vector multiplication, and then repeating it for every column of B. So my written code loops over A once per column of B. I do not know if that is what the compiler is actually doing, or if some other pattern would lead to faster code.
The point of optimizing multiplication of tiny matrices is as a kernel for a recursive algorithm for multiplying larger matrices. So I also need to figure out the best way to handle different sizes. This algorithm is far better for 8x8 than it is other sizes. For nrow(A) % 4 (ie, if A has 10 rows, 10 % 4 = 2) I used the old approach for the remainder, after the broadcastable block.
But for 10x10 matrices, it takes 151 ns. 12 is perfectly divisible by 4, but it takes 226. If this approach scaled with O(n^3), the times should be 91 ns and 158 ns respectively. I am falling well short. I think I need to block down to a very small size, and try and get as many 8x8 as possible.
It may be the case that 8x8 ought to be the maximum size.
这将是使用可以暴露微架构瓶颈的低级工具进行分析和性能分析的一个很好的例子。虽然我没有使用过AMD \xce\xbcProf,但我对 Intel 等价产品(如XTU)的经验表明,当使用由同一家公司工作的人编写的工具时,甚至可能坐在附近,您将获得最佳结果。负责 Ryzen AVX 指令的硬件实现的人员。
\n\n当运行大量迭代时,从应用程序的基于事件的配置文件开始。您可以查找的一般领域包括生成的程序集的一种或另一种类型是否可以更好地利用执行端口或相关后端 CPU 资源,或者它们在缓存和内存访问方面的行为是否不同。这些都不能回答您的概念性问题,即为什么 gcc 选择以一种样式生成程序集而以另一种样式生成 LLVM,但它可能会在硬件级别告诉您更多有关为什么 LLVM 生成的程序集运行速度更快的信息。
\n| 归档时间: |
|
| 查看次数: |
386 次 |
| 最近记录: |