SSE Intrinsics and U-Turn

I'm trying to optimize some loops, and I did it, but I wonder if I only partially did it. Say, for example, that I have this loop:

for(i=0;i<n;i++){
b[i] = a[i]*2;
}

expanding it 3 times, it turns out the following:

int unroll = (n/4)*4;
for(i=0;i<unroll;i+=4)
{
b[i] = a[i]*2;
b[i+1] = a[i+1]*2;
b[i+2] = a[i+2]*2;
b[i+3] = a[i+3]*2;
}
for(;i<n;i++)
{
b[i] = a[i]*2;
} 

Now the equivalent of translating SSE:

__m128 ai_v = _mm_loadu_ps(&a[i]);
__m128 two_v = _mm_set1_ps(2);
__m128 ai2_v = _mm_mul_ps(ai_v, two_v);
_mm_storeu_ps(&b[i], ai2_v);

or that:

__m128 ai_v = _mm_loadu_ps(&a[i]);
__m128 two_v = _mm_set1_ps(2);
__m128 ai2_v = _mm_mul_ps(ai_v, two_v);
_mm_storeu_ps(&b[i], ai2_v);

__m128 ai1_v = _mm_loadu_ps(&a[i+1]);
__m128 two1_v = _mm_set1_ps(2);
__m128 ai_1_2_v = _mm_mul_ps(ai1_v, two1_v);
_mm_storeu_ps(&b[i+1], ai_1_2_v);

__m128 ai2_v = _mm_loadu_ps(&a[i+2]);
__m128 two2_v = _mm_set1_ps(2);
__m128 ai_2_2_v = _mm_mul_ps(ai2_v, two2_v);
_mm_storeu_ps(&b[i+2], ai_2_2_v);

__m128 ai3_v = _mm_loadu_ps(&a[i+3]);
__m128 two3_v = _mm_set1_ps(2);
__m128 ai_3_2_v = _mm_mul_ps(ai3_v, two3_v);
_mm_storeu_ps(&b[i+3], ai_3_2_v);

I got a little confused in the code section:

for(;i<n;i++)
{
b[i] = a[i]*2;
}

what does it do? Do you just need to do additional parts, for example, if the cycle is not divided by the factor that you want to deploy? Thank.

+2
source share
2 answers

The answer is the first block:

    __m128 ai_v = _mm_loadu_ps(&a[i]);
    __m128 two_v = _mm_set1_ps(2);
    __m128 ai2_v = _mm_mul_ps(ai_v,two_v);
    _mm_storeu_ps(&b[i],ai2_v);

It already takes four variables at a time.

Here is the complete program with the equivalent code section:

#include <iostream>

int main()
{
    int i{0};
    float a[10] ={1,2,3,4,5,6,7,8,9,10};
    float b[10] ={0,0,0,0,0,0,0,0,0,0};

    int n = 10;
    int unroll = (n/4)*4;
    for (i=0; i<unroll; i+=4) {
        //b[i] = a[i]*2;
        //b[i+1] = a[i+1]*2;
        //b[i+2] = a[i+2]*2;
        //b[i+3] = a[i+3]*2;
        __m128 ai_v = _mm_loadu_ps(&a[i]);
        __m128 two_v = _mm_set1_ps(2);
        __m128 ai2_v = _mm_mul_ps(ai_v,two_v);
        _mm_storeu_ps(&b[i],ai2_v);
    }

    for (; i<n; i++) {
        b[i] = a[i]*2;
    }

    for (auto i : a) { std::cout << i << "\t"; }
    std::cout << "\n";
    for (auto i : b) { std::cout << i << "\t"; }
    std::cout << "\n";

    return 0;
}

; , movups, movaps, .

:

#include <iostream>
//#define NO_UNROLL
//#define UNROLL
//#define SSE_UNROLL
#define SSE_UNROLL_ALIGNED

int main()
{
    const size_t array_size = 100003;
#ifdef SSE_UNROLL_ALIGNED
    __declspec(align(16)) int i{0};
    __declspec(align(16)) float a[array_size] ={1,2,3,4,5,6,7,8,9,10};
    __declspec(align(16)) float b[array_size] ={0,0,0,0,0,0,0,0,0,0};
#endif
#ifndef SSE_UNROLL_ALIGNED
    int i{0};
    float a[array_size] ={1,2,3,4,5,6,7,8,9,10};
    float b[array_size] ={0,0,0,0,0,0,0,0,0,0};
#endif

    int n = array_size;
    int unroll = (n/4)*4;


    for (size_t j{0}; j < 100000; ++j) {
#ifdef NO_UNROLL
        for (i=0; i<n; i++) {
            b[i] = a[i]*2;
        }
#endif
#ifdef UNROLL
        for (i=0; i<unroll; i+=4) {
            b[i] = a[i]*2;
            b[i+1] = a[i+1]*2;
            b[i+2] = a[i+2]*2;
            b[i+3] = a[i+3]*2;
        }
#endif
#ifdef SSE_UNROLL
        for (i=0; i<unroll; i+=4) {
            __m128 ai_v = _mm_loadu_ps(&a[i]);
            __m128 two_v = _mm_set1_ps(2);
            __m128 ai2_v = _mm_mul_ps(ai_v,two_v);
            _mm_storeu_ps(&b[i],ai2_v);
        }
#endif
#ifdef SSE_UNROLL_ALIGNED
        for (i=0; i<unroll; i+=4) {
            __m128 ai_v = _mm_load_ps(&a[i]);
            __m128 two_v = _mm_set1_ps(2);
            __m128 ai2_v = _mm_mul_ps(ai_v,two_v);
            _mm_store_ps(&b[i],ai2_v);
        }
#endif
#ifndef NO_UNROLL
        for (; i<n; i++) {
            b[i] = a[i]*2;
        }
#endif
    }

    //for (auto i : a) { std::cout << i << "\t"; }
    //std::cout << "\n";
    //for (auto i : b) { std::cout << i << "\t"; }
    //std::cout << "\n";

    return 0;
}

(x86):

  • NO_UNROLL: 0.994 , SSE
  • UNROLL: 3,511 , movups
  • SSE_UNROLL: 3,315 , movups
  • SSE_UNROLL_ALIGNED: 3,276 , movaps

, , . , movaps, .

64 (x64) :

  • NO_UNROLL: 1,138 , SSE
  • UNROLL: 1,409 , SSE
  • SSE_UNROLL: 1.420 , SSE !
  • SSE_UNROLL_ALIGNED: 1.476 , SSE !

, MSVC , , - .

+2

, SSE . . , ASM SSE:

foo:
.LFB0:
    .cfi_startproc
    testl   %edi, %edi
    jle .L7
    movl    %edi, %esi
    shrl    $2, %esi
    cmpl    $3, %edi
    leal    0(,%rsi,4), %eax
    jbe .L8
    testl   %eax, %eax
    je  .L8
    vmovdqa .LC0(%rip), %xmm1
    xorl    %edx, %edx
    xorl    %ecx, %ecx
    .p2align 4,,10
    .p2align 3
.L6:
    addl    $1, %ecx
    vpmulld a(%rdx), %xmm1, %xmm0
    vmovdqa %xmm0, b(%rdx)
    addq    $16, %rdx
    cmpl    %esi, %ecx
    jb  .L6
    cmpl    %eax, %edi
    je  .L7
    .p2align 4,,10
    .p2align 3
.L9:
    movslq  %eax, %rdx
    addl    $1, %eax
    movl    a(,%rdx,4), %ecx
    addl    %ecx, %ecx
    cmpl    %eax, %edi
    movl    %ecx, b(,%rdx,4)
    jg  .L9
.L7:
    rep
    ret
.L8:
    xorl    %eax, %eax
    jmp .L9
    .cfi_endproc

, , . - .

.

+3

Source: https://habr.com/ru/post/1662724/


All Articles