一个更好的8x8字节矩阵转置与SSE?

xma*_*s79 11 c optimization sse simd matrix

我发现这个职位,说明如何进行转一个8x8矩阵的字节24点的操作,和几个卷轴后有代码实现转置.但是,这种方法没有利用我们可以阻止 8x8转置为4个4x4转置的事实,并且每个转换只能在一个shuffle指令中完成(这篇文章是参考文献).所以我推出了这个解决方案:

__m128i transpose4x4mask = _mm_set_epi8(15, 11, 7, 3, 14, 10, 6, 2, 13,  9, 5, 1, 12,  8, 4, 0);
__m128i shuffle8x8Mask = _mm_setr_epi8(0, 1, 2, 3, 8, 9, 10, 11, 4,  5, 6, 7, 12,  13, 14, 15);

void TransposeBlock8x8(uint8_t *src, uint8_t *dst, int srcStride, int dstStride) {
    __m128i load0 = _mm_set_epi64x(*(uint64_t*)(src + 1 * srcStride), *(uint64_t*)(src + 0 * srcStride));
    __m128i load1 = _mm_set_epi64x(*(uint64_t*)(src + 3 * srcStride), *(uint64_t*)(src + 2 * srcStride));
    __m128i load2 = _mm_set_epi64x(*(uint64_t*)(src + 5 * srcStride), *(uint64_t*)(src + 4 * srcStride));
    __m128i load3 = _mm_set_epi64x(*(uint64_t*)(src + 7 * srcStride), *(uint64_t*)(src + 6 * srcStride));

    __m128i shuffle0 = _mm_shuffle_epi8(load0, shuffle8x8Mask);
    __m128i shuffle1 = _mm_shuffle_epi8(load1, shuffle8x8Mask);
    __m128i shuffle2 = _mm_shuffle_epi8(load2, shuffle8x8Mask);
    __m128i shuffle3 = _mm_shuffle_epi8(load3, shuffle8x8Mask);

    __m128i block0 = _mm_unpacklo_epi64(shuffle0, shuffle1);
    __m128i block1 = _mm_unpackhi_epi64(shuffle0, shuffle1);
    __m128i block2 = _mm_unpacklo_epi64(shuffle2, shuffle3);
    __m128i block3 = _mm_unpackhi_epi64(shuffle2, shuffle3);

    __m128i transposed0 = _mm_shuffle_epi8(block0, transpose4x4mask);   
    __m128i transposed1 = _mm_shuffle_epi8(block1, transpose4x4mask);   
    __m128i transposed2 = _mm_shuffle_epi8(block2, transpose4x4mask);   
    __m128i transposed3 = _mm_shuffle_epi8(block3, transpose4x4mask);   

    __m128i store0 = _mm_unpacklo_epi32(transposed0, transposed2);
    __m128i store1 = _mm_unpackhi_epi32(transposed0, transposed2);
    __m128i store2 = _mm_unpacklo_epi32(transposed1, transposed3);
    __m128i store3 = _mm_unpackhi_epi32(transposed1, transposed3);

    *((uint64_t*)(dst + 0 * dstStride)) = _mm_extract_epi64(store0, 0);
    *((uint64_t*)(dst + 1 * dstStride)) = _mm_extract_epi64(store0, 1);
    *((uint64_t*)(dst + 2 * dstStride)) = _mm_extract_epi64(store1, 0);
    *((uint64_t*)(dst + 3 * dstStride)) = _mm_extract_epi64(store1, 1);
    *((uint64_t*)(dst + 4 * dstStride)) = _mm_extract_epi64(store2, 0);
    *((uint64_t*)(dst + 5 * dstStride)) = _mm_extract_epi64(store2, 1);
    *((uint64_t*)(dst + 6 * dstStride)) = _mm_extract_epi64(store3, 0);
    *((uint64_t*)(dst + 7 * dstStride)) = _mm_extract_epi64(store3, 1);
}
Run Code Online (Sandbox Code Playgroud)

不包括加载/存储操作,此过程仅包含16条指令而不是24条指令.

我错过了什么?

wim*_*wim 6

除了读取pinsrq和写入内存的加载、存储和 -s 之外,步幅可能不等于 8 字节,您只需 12 条指令即可完成转置(此代码可以轻松地与 Z boson 的测试代码结合使用) ):

void tran8x8b_SSE_v2(char *A, char *B) {
  __m128i pshufbcnst = _mm_set_epi8(15,11,7,3, 14,10,6,2, 13,9,5,1, 12,8,4,0);

  __m128i B0, B1, B2, B3, T0, T1, T2, T3;
  B0 = _mm_loadu_si128((__m128i*)&A[ 0]);
  B1 = _mm_loadu_si128((__m128i*)&A[16]);
  B2 = _mm_loadu_si128((__m128i*)&A[32]);
  B3 = _mm_loadu_si128((__m128i*)&A[48]);


  T0 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(B0),_mm_castsi128_ps(B1),0b10001000));
  T1 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(B2),_mm_castsi128_ps(B3),0b10001000));
  T2 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(B0),_mm_castsi128_ps(B1),0b11011101));
  T3 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(B2),_mm_castsi128_ps(B3),0b11011101));

  B0 = _mm_shuffle_epi8(T0,pshufbcnst);
  B1 = _mm_shuffle_epi8(T1,pshufbcnst);
  B2 = _mm_shuffle_epi8(T2,pshufbcnst);
  B3 = _mm_shuffle_epi8(T3,pshufbcnst);

  T0 = _mm_unpacklo_epi32(B0,B1);
  T1 = _mm_unpackhi_epi32(B0,B1);
  T2 = _mm_unpacklo_epi32(B2,B3);
  T3 = _mm_unpackhi_epi32(B2,B3);

  _mm_storeu_si128((__m128i*)&B[ 0], T0);
  _mm_storeu_si128((__m128i*)&B[16], T1);
  _mm_storeu_si128((__m128i*)&B[32], T2);
  _mm_storeu_si128((__m128i*)&B[48], T3);
}
Run Code Online (Sandbox Code Playgroud)


这里我们使用32位浮点shuffle,它比epi32shuffle更加灵活。强制转换不会生成额外的指令(使用 gcc 5.4 生成的代码):

tran8x8b_SSE_v2:
.LFB4885:
    .cfi_startproc
    vmovdqu 48(%rdi), %xmm5
    vmovdqu 32(%rdi), %xmm2
    vmovdqu 16(%rdi), %xmm0
    vmovdqu (%rdi), %xmm1
    vshufps $136, %xmm5, %xmm2, %xmm4
    vshufps $221, %xmm5, %xmm2, %xmm2
    vmovdqa .LC6(%rip), %xmm5
    vshufps $136, %xmm0, %xmm1, %xmm3
    vshufps $221, %xmm0, %xmm1, %xmm1
    vpshufb %xmm5, %xmm3, %xmm3
    vpshufb %xmm5, %xmm1, %xmm0
    vpshufb %xmm5, %xmm4, %xmm4
    vpshufb %xmm5, %xmm2, %xmm1
    vpunpckldq  %xmm4, %xmm3, %xmm5
    vpunpckldq  %xmm1, %xmm0, %xmm2
    vpunpckhdq  %xmm4, %xmm3, %xmm3
    vpunpckhdq  %xmm1, %xmm0, %xmm0
    vmovups %xmm5, (%rsi)
    vmovups %xmm3, 16(%rsi)
    vmovups %xmm2, 32(%rsi)
    vmovups %xmm0, 48(%rsi)
    ret
    .cfi_endproc
Run Code Online (Sandbox Code Playgroud)



在某些(但不是全部)较旧的 cpu 上,在整数和浮点单元之间移动数据时可能会有较小的旁路延迟(0 到 2 个周期之间)。这会增加函数的延迟,但不一定会影响代码的吞吐量。

1e9 换位的简单延迟测试:

  for (int i=0;i<500000000;i++){
     tran8x8b_SSE(A,C);
     tran8x8b_SSE(C,A);
  }
  print8x8b(A);
Run Code Online (Sandbox Code Playgroud)

对于 tran8x8b_SSE,这大约需要 5.5 秒(19.7e9 周期);对于 tran8x8b_SSE_v2(Intel core i5-6500),大约需要 4.5 秒(16.0e9 周期)。请注意,尽管函数内联在 for 循环中,但编译器并未消除加载和存储。


更新:AVX2-128 / SSE 4.1 混合解决方案。

“洗牌”(解包、洗牌)由端口 5 处理,在现代 cpu 上每个 cpu 周期有 1 条指令。有时用两种混合代替一种“洗牌”是值得的。在 Skylake 上,32 位混合指令可以在端口 0、1 或 5 上运行。

不幸的是,_mm_blend_epi32只有AVX2-128。一个有效的 SSE 4.1 替代方案是_mm_blend_ps与一些强制类型转换(通常是免费的)相结合。12 次“洗牌”被 8 次洗牌与 8 种混合组合所取代。

现在,简单的延迟测试运行时间约为 3.6 秒(13e9 个 cpu 周期),比tran8x8b_SSE_v2.

代码:

/* AVX2-128 version, sse 4.1 version see ---------------->       SSE 4.1 version of tran8x8b_AVX2_128()                                                              */
void tran8x8b_AVX2_128(char *A, char *B) {                   /*  void tran8x8b_SSE4_1(char *A, char *B) {                                                            */                                    
  __m128i pshufbcnst_0 = _mm_set_epi8(15, 7,11, 3,  
               13, 5, 9, 1,  14, 6,10, 2,  12, 4, 8, 0);     /*    __m128i pshufbcnst_0 = _mm_set_epi8(15, 7,11, 3,  13, 5, 9, 1,  14, 6,10, 2,  12, 4, 8, 0);       */                                    
  __m128i pshufbcnst_1 = _mm_set_epi8(13, 5, 9, 1,  
               15, 7,11, 3,  12, 4, 8, 0,  14, 6,10, 2);     /*    __m128i pshufbcnst_1 = _mm_set_epi8(13, 5, 9, 1,  15, 7,11, 3,  12, 4, 8, 0,  14, 6,10, 2);       */                                    
  __m128i pshufbcnst_2 = _mm_set_epi8(11, 3,15, 7,  
                9, 1,13, 5,  10, 2,14, 6,   8, 0,12, 4);     /*    __m128i pshufbcnst_2 = _mm_set_epi8(11, 3,15, 7,   9, 1,13, 5,  10, 2,14, 6,   8, 0,12, 4);       */                                    
  __m128i pshufbcnst_3 = _mm_set_epi8( 9, 1,13, 5,  
               11, 3,15, 7,   8, 0,12, 4,  10, 2,14, 6);     /*    __m128i pshufbcnst_3 = _mm_set_epi8( 9, 1,13, 5,  11, 3,15, 7,   8, 0,12, 4,  10, 2,14, 6);       */                                    
  __m128i B0, B1, B2, B3, T0, T1, T2, T3;                    /*    __m128 B0, B1, B2, B3, T0, T1, T2, T3;                                                            */                                    
                                                             /*                                                                                                      */                                    
  B0 = _mm_loadu_si128((__m128i*)&A[ 0]);                    /*    B0 = _mm_loadu_ps((float*)&A[ 0]);                                                                */                                    
  B1 = _mm_loadu_si128((__m128i*)&A[16]);                    /*    B1 = _mm_loadu_ps((float*)&A[16]);                                                                */                                    
  B2 = _mm_loadu_si128((__m128i*)&A[32]);                    /*    B2 = _mm_loadu_ps((float*)&A[32]);                                                                */                                    
  B3 = _mm_loadu_si128((__m128i*)&A[48]);                    /*    B3 = _mm_loadu_ps((float*)&A[48]);                                                                */                                    
                                                             /*                                                                                                      */                                    
  B1 = _mm_shuffle_epi32(B1,0b10110001);                     /*    B1 = _mm_shuffle_ps(B1,B1,0b10110001);                                                            */                                    
  B3 = _mm_shuffle_epi32(B3,0b10110001);                     /*    B3 = _mm_shuffle_ps(B3,B3,0b10110001);                                                            */                                    
  T0 = _mm_blend_epi32(B0,B1,0b1010);                        /*    T0 = _mm_blend_ps(B0,B1,0b1010);                                                                  */                                    
  T1 = _mm_blend_epi32(B2,B3,0b1010);                        /*    T1 = _mm_blend_ps(B2,B3,0b1010);                                                                  */                                    
  T2 = _mm_blend_epi32(B0,B1,0b0101);                        /*    T2 = _mm_blend_ps(B0,B1,0b0101);                                                                  */                                    
  T3 = _mm_blend_epi32(B2,B3,0b0101);                        /*    T3 = _mm_blend_ps(B2,B3,0b0101);                                                                  */                                    
                                                             /*                                                                                                      */                                    
  B0 = _mm_shuffle_epi8(T0,pshufbcnst_0);                    /*    B0 = _mm_castsi128_ps(_mm_shuffle_epi8(_mm_castps_si128(T0),pshufbcnst_0));                       */                                    
  B1 = _mm_shuffle_epi8(T1,pshufbcnst_1);                    /*    B1 = _mm_castsi128_ps(_mm_shuffle_epi8(_mm_castps_si128(T1),pshufbcnst_1));                       */                                    
  B2 = _mm_shuffle_epi8(T2,pshufbcnst_2);                    /*    B2 = _mm_castsi128_ps(_mm_shuffle_epi8(_mm_castps_si128(T2),pshufbcnst_2));                       */                                    
  B3 = _mm_shuffle_epi8(T3,pshufbcnst_3);                    /*    B3 = _mm_castsi128_ps(_mm_shuffle_epi8(_mm_castps_si128(T3),pshufbcnst_3));                       */                                    
                                                             /*                                                                                                      */                                    
  T0 = _mm_blend_epi32(B0,B1,0b1010);                        /*    T0 = _mm_blend_ps(B0,B1,0b1010);                                                                  */                                    
  T1 = _mm_blend_epi32(B0,B1,0b0101);                        /*    T1 = _mm_blend_ps(B0,B1,0b0101);                                                                  */                                    
  T2 = _mm_blend_epi32(B2,B3,0b1010);                        /*    T2 = _mm_blend_ps(B2,B3,0b1010);                                                                  */                                    
  T3 = _mm_blend_epi32(B2,B3,0b0101);                        /*    T3 = _mm_blend_ps(B2,B3,0b0101);                                                                  */                                    
  T1 = _mm_shuffle_epi32(T1,0b10110001);                     /*    T1 = _mm_shuffle_ps(T1,T1,0b10110001);                                                            */                                    
  T3 = _mm_shuffle_epi32(T3,0b10110001);                     /*    T3 = _mm_shuffle_ps(T3,T3,0b10110001);                                                            */                                    
                                                             /*                                                                                                      */                                    
  _mm_storeu_si128((__m128i*)&B[ 0], T0);                    /*    _mm_storeu_ps((float*)&B[ 0], T0);                                                                */                                    
  _mm_storeu_si128((__m128i*)&B[16], T1);                    /*    _mm_storeu_ps((float*)&B[16], T1);                                                                */                                    
  _mm_storeu_si128((__m128i*)&B[32], T2);                    /*    _mm_storeu_ps((float*)&B[32], T2);                                                                */                                    
  _mm_storeu_si128((__m128i*)&B[48], T3);                    /*    _mm_storeu_ps((float*)&B[48], T3);                                                                */                                    
}                                                            /*  }                                                                                                   */                                    
Run Code Online (Sandbox Code Playgroud)