How to optimize the SIMD transpose function (8x4 => 4x8)?

I need to optimize the transposition of 840 and 4x8 float arrays using AVX. I am using Agner Fog vector class library .

Teal's task is to build a BVH and summarize min-max. Transposition is used at the final stage of each cycle (they are also optimized by multithreading, but there can be many tasks).

The code now looks like this:

void transpose(register Vec4f (&fin)[8], register Vec8f (&mat)[4]) {
    for (int i = 0;i < 8;i++) {
        fin[i] = lookup<28>(Vec4i(0, 8, 16, 24) + i, (float *)mat);
    }
}

Need optimization options. How to optimize this feature for SIMD?


I recently wrote my own transpose options (4x8 and 8x4) with a vector class. Version 1.0.

void transpose(register Vec4f(&fin)[8], register Vec8f(&mat)[4]) {
    register Vec8f a00 = blend8f<0, 8, 1, 9, 2, 10, 3, 11>(mat[0], mat[1]);
    register Vec8f a10 = blend8f<0, 8, 1, 9, 2, 10, 3, 11>(mat[2], mat[3]);
    register Vec8f a01 = blend8f<4, 12, 5, 13, 6, 14, 7, 15>(mat[0], mat[1]);
    register Vec8f a11 = blend8f<4, 12, 5, 13, 6, 14, 7, 15>(mat[2], mat[3]);

    register Vec8f v0_1 = blend8f<0, 1, 8, 9, 2, 3, 10, 11>(a00, a10);
    register Vec8f v2_3 = blend8f<4, 5, 12, 13, 6, 7, 14, 15>(a00, a10);
    register Vec8f v4_5 = blend8f<0, 1, 8, 9, 2, 3, 10, 11>(a01, a11);
    register Vec8f v6_7 = blend8f<4, 5, 12, 13, 6, 7, 14, 15>(a01, a11);

    fin[0] = v0_1.get_low();
    fin[1] = v0_1.get_high();
    fin[2] = v2_3.get_low();
    fin[3] = v2_3.get_high();
    fin[4] = v4_5.get_low();
    fin[5] = v4_5.get_high();
    fin[6] = v6_7.get_low();
    fin[7] = v6_7.get_high();
}

void transpose(register Vec8f(&fin)[4], register Vec4f(&mat)[8]) {
    register Vec8f a0_1 = Vec8f(mat[0], mat[1]);
    register Vec8f a2_3 = Vec8f(mat[2], mat[3]);
    register Vec8f a4_5 = Vec8f(mat[4], mat[5]);
    register Vec8f a6_7 = Vec8f(mat[6], mat[7]);

    register Vec8f a00 = blend8f<0, 4, 8 , 12, 1, 5, 9 , 13>(a0_1, a2_3);
    register Vec8f a10 = blend8f<0, 4, 8 , 12, 1, 5, 9 , 13>(a4_5, a6_7);
    register Vec8f a01 = blend8f<2, 6, 10, 14, 3, 7, 11, 15>(a0_1, a2_3);
    register Vec8f a11 = blend8f<2, 6, 10, 14, 3, 7, 11, 15>(a4_5, a6_7);

    fin[0] = blend8f<0, 1, 2, 3, 8, 9, 10, 11>(a00, a10);
    fin[1] = blend8f<4, 5, 6, 7, 12, 13, 14, 15>(a00, a10);
    fin[2] = blend8f<0, 1, 2, 3, 8, 9, 10, 11>(a01, a11);
    fin[3] = blend8f<4, 5, 6, 7, 12, 13, 14, 15>(a01, a11);
}

Version 2.0 required.

+4
source share
2

vectorclass, lookup , - .

SSE/AVX . , vectorclass. __m128 __m256 Vec4f Vec8f. .


SSE intrinsics _MM_TRANSPOSE4_PS xmmintrin.h. 4x4 ​​ 128- . SSE (.. AVX), , . :

#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) {    \
  __m128 tmp3, tmp2, tmp1, tmp0;                      \
  tmp0 = _mm_shuffle_ps(row0, row1, 0x44);            \
  tmp2 = _mm_shuffle_ps(row0, row1, 0xEE);            \
  tmp1 = _mm_shuffle_ps(row2, row3, 0x44);            \
  tmp3 = _mm_shuffle_ps(row2, row3, 0xEE);            \
  row0 = _mm_shuffle_ps(tmp0, tmp1, 0x88);            \
  row1 = _mm_shuffle_ps(tmp0, tmp1, 0xDD);            \
  row2 = _mm_shuffle_ps(tmp2, tmp3, 0x88);            \
  row3 = _mm_shuffle_ps(tmp2, tmp3, 0xDD);            \
}

AVX 256- SSE ( ). _mm256_shuffle_ps : 128- , _mm. , _mm _mm256 , 4x4: , 256- , , 256- . 256- .

. , . , 12 , , .

void Transpose4x8(__m128 dst[8], __m256 src[4]) {
  __m256 row0 = src[0], row1 = src[1], row2 = src[2], row3 = src[3];
  __m256 tmp3, tmp2, tmp1, tmp0;
  tmp0 = _mm256_shuffle_ps(row0, row1, 0x44);
  tmp2 = _mm256_shuffle_ps(row0, row1, 0xEE);
  tmp1 = _mm256_shuffle_ps(row2, row3, 0x44);
  tmp3 = _mm256_shuffle_ps(row2, row3, 0xEE);
  row0 = _mm256_shuffle_ps(tmp0, tmp1, 0x88);
  row1 = _mm256_shuffle_ps(tmp0, tmp1, 0xDD);
  row2 = _mm256_shuffle_ps(tmp2, tmp3, 0x88);
  row3 = _mm256_shuffle_ps(tmp2, tmp3, 0xDD);
  dst[0] = _mm256_castps256_ps128(row0);
  dst[1] = _mm256_castps256_ps128(row1);
  dst[2] = _mm256_castps256_ps128(row2);
  dst[3] = _mm256_castps256_ps128(row3);
  dst[4] = _mm256_extractf128_ps(row0, 1);
  dst[5] = _mm256_extractf128_ps(row1, 1);
  dst[6] = _mm256_extractf128_ps(row2, 1);
  dst[7] = _mm256_extractf128_ps(row3, 1);
}

UPDATE , . :

void Transpose8x4(__m256 dst[4], __m128 src[8]) {
  __m256 row0 = _mm256_setr_m128(src[0], src[4]);
  __m256 row1 = _mm256_setr_m128(src[1], src[5]);
  __m256 row2 = _mm256_setr_m128(src[2], src[6]);
  __m256 row3 = _mm256_setr_m128(src[3], src[7]);
  __m256 tmp3, tmp2, tmp1, tmp0;
  tmp0 = _mm256_shuffle_ps(row0, row1, 0x44);
  tmp2 = _mm256_shuffle_ps(row0, row1, 0xEE);
  tmp1 = _mm256_shuffle_ps(row2, row3, 0x44);
  tmp3 = _mm256_shuffle_ps(row2, row3, 0xEE);
  row0 = _mm256_shuffle_ps(tmp0, tmp1, 0x88);
  row1 = _mm256_shuffle_ps(tmp0, tmp1, 0xDD);
  row2 = _mm256_shuffle_ps(tmp2, tmp3, 0x88);
  row3 = _mm256_shuffle_ps(tmp2, tmp3, 0xDD);
  dst[0] = row0; dst[1] = row1; dst[2] = row2; dst[3] = row3;
}
+4

(VCL) . , , , .

Stgatilov VCL, ( ). :

void tran8x4_AVX(float *a, float *b) {
    Vec8f tmp0, tmp1, tmp2, tmp3;
    Vec8f row0, row1, row2, row3;

    row0 = Vec8f().load(&a[8*0]);
    row1 = Vec8f().load(&a[8*1]);
    row2 = Vec8f().load(&a[8*2]);
    row3 = Vec8f().load(&a[8*3]);    

    tmp0 = blend8f<0, 1,  8, 9,  4, 5, 12, 13>(row0, row1);
    tmp2 = blend8f<2, 3, 10, 11, 6, 7, 14, 15>(row0, row1);
    tmp1 = blend8f<0, 1,  8, 9,  4, 5, 12, 13>(row2, row3);
    tmp3 = blend8f<2, 3, 10, 11, 6, 7, 14, 15>(row2, row3);

    row0 = blend8f<0, 2, 8, 10, 4, 6, 12, 14>(tmp0, tmp1);
    row1 = blend8f<1, 3, 9, 11, 5, 7, 13, 15>(tmp0, tmp1);
    row2 = blend8f<0, 2, 8, 10, 4, 6, 12, 14>(tmp2, tmp3);
    row3 = blend8f<1, 3, 9, 11, 5, 7, 13, 15>(tmp2, tmp3);

    row0.get_low().store(&b[  4*0]);
    row1.get_low().store(&b[  4*1]);
    row2.get_low().store(&b[  4*2]);
    row3.get_low().store(&b[  4*3]);
    row0.get_high().store(&b[ 4*4]);
    row1.get_high().store(&b[ 4*5]);
    row2.get_high().store(&b[ 4*6]);
    row3.get_high().store(&b[ 4*7]);
}

(g++ -S -O3 -mavx test.cpp)

    vmovups 32(%rdi), %ymm4
    vmovups 64(%rdi), %ymm3
    vmovups (%rdi), %ymm1
    vmovups 96(%rdi), %ymm0
    vshufps $68, %ymm4, %ymm1, %ymm2
    vshufps $68, %ymm0, %ymm3, %ymm5
    vshufps $238, %ymm4, %ymm1, %ymm1
    vshufps $238, %ymm0, %ymm3, %ymm0
    vshufps $136, %ymm5, %ymm2, %ymm4
    vshufps $221, %ymm5, %ymm2, %ymm2
    vshufps $136, %ymm0, %ymm1, %ymm3
    vshufps $221, %ymm0, %ymm1, %ymm0
    vmovups %xmm4, (%rsi)
    vextractf128    $0x1, %ymm4, %xmm4
    vmovups %xmm2, 16(%rsi)
    vextractf128    $0x1, %ymm2, %xmm2
    vmovups %xmm3, 32(%rsi)
    vextractf128    $0x1, %ymm3, %xmm3
    vmovups %xmm0, 48(%rsi)
    vextractf128    $0x1, %ymm0, %xmm0
    vmovups %xmm4, 64(%rsi)
    vmovups %xmm2, 80(%rsi)
    vmovups %xmm3, 96(%rsi)
    vmovups %xmm0, 112(%rsi)
    vzeroupper
    ret
    .cfi_endproc

#include <stdio.h>
#include "vectorclass.h"

void tran8x4(float *a, float *b) {
    for(int i=0; i<4; i++) {
        for(int j=0; j<8; j++) {
            b[j*4+i] = a[i*8+j];
        }
    }
}

void tran8x4_AVX(float *a, float *b) {
    Vec8f tmp0, tmp1, tmp2, tmp3;
    Vec8f row0, row1, row2, row3;

    row0 = Vec8f().load(&a[8*0]);
    row1 = Vec8f().load(&a[8*1]);
    row2 = Vec8f().load(&a[8*2]);
    row3 = Vec8f().load(&a[8*3]);


    tmp0 = blend8f<0, 1, 8, 9, 4, 5, 12, 13>(row0, row1);
    tmp2 = blend8f<2, 3, 10, 11, 6, 7, 14, 15>(row0, row1);
    tmp1 = blend8f<0, 1, 8, 9, 4, 5, 12, 13>(row2, row3);
    tmp3 = blend8f<2, 3, 10, 11, 6, 7, 14, 15>(row2, row3);

    row0 = blend8f<0, 2, 8, 10, 4, 6, 12, 14>(tmp0, tmp1);
    row1 = blend8f<1, 3, 9, 11, 5, 7, 13, 15>(tmp0, tmp1);
    row2 = blend8f<0, 2, 8, 10, 4, 6, 12, 14>(tmp2, tmp3);
    row3 = blend8f<1, 3, 9, 11, 5, 7, 13, 15>(tmp2, tmp3);

    row0.get_low().store(&b[  4*0]);
    row1.get_low().store(&b[  4*1]);
    row2.get_low().store(&b[  4*2]);
    row3.get_low().store(&b[  4*3]);
    row0.get_high().store(&b[ 4*4]);
    row1.get_high().store(&b[ 4*5]);
    row2.get_high().store(&b[ 4*6]);
    row3.get_high().store(&b[ 4*7]);

}


int main() {
    float a[32], b1[32], b2[32];
    for(int i=0; i<32; i++) a[i] = i;
    for(int i=0; i<4; i++) {
        for(int j=0; j<8; j++) {
            printf("%2.0f ", a[i*8+j]);
        } puts("");
    }
    tran8x4(a,b1);
    tran8x4_AVX(a,b2);
    puts("");
    for(int i=0; i<8; i++) {
        for(int j=0; j<4; j++) {
            printf("%2.0f ", b1[i*4+j]);
        } puts("");
    }
    puts("");
    for(int i=0; i<8; i++) {
        for(int j=0; j<4; j++) {
            printf("%2.0f ", b2[i*4+j]);
        } puts("");
    }
}
+1

Source: https://habr.com/ru/post/1618816/


All Articles