Gcc when passing by reference and by value

I have a simple function that calculates the product of two double arrays:

#include <stdlib.h>
#include <emmintrin.h>

struct S {
    double *x;
    double *y;
    double *z;
};

void f(S& s, size_t n) {
    for (int i = 0; i < n; i += 2) {
        __m128d xs = _mm_load_pd(&s.x[i]);
        __m128d ys = _mm_load_pd(&s.y[i]);
        _mm_store_pd(&s.z[i], _mm_mul_pd(xs, ys) );
    }
    return;
}

int main(void) {
    S s;
    size_t size = 4;
    posix_memalign((void **)&s.x, 16, sizeof(double) * size);
    posix_memalign((void **)&s.y, 16, sizeof(double) * size);
    posix_memalign((void **)&s.z, 16, sizeof(double) * size);
    f(s, size);
    return 0;
}

Note that the first argument to f is passed by reference. Let's look at the resulting assembly f () (I deleted some irrelevant snippets, inserted comments, and put some shortcuts):

$ g++ -O3 -S asmtest.cpp 


        .globl      _Z1fR1Sm
_Z1fR1Sm:
        xorl        %eax, %eax
        testq       %rsi, %rsi
        je  .L1
.L5:
        movq        (%rdi), %r8             # array x   (1)
        movq        8(%rdi), %rcx           # array y   (2)
        movq        16(%rdi), %rdx          # array z   (3)
        movapd      (%r8,%rax,8), %xmm0     # load x[0]
        mulpd       (%rcx,%rax,8), %xmm0    # multiply x[0]*y[0]
        movaps      %xmm0, (%rdx,%rax,8)    # store to y
        addq        $2, %rax                # and loop
        cmpq        %rax, %rsi
        ja  .L5

Note that the addresses of the arrays x, y, z are loaded into universal registers at each iteration, see statements (1), (2), (3). Why doesn't gcc move these instructions outside the loop?

Now create a local copy (not a deep copy) of the structure:

void __attribute__((noinline)) f(S& args, size_t n) {
    S s = args;
    for (int i = 0; i < n; i += 2) {
        __m128d xs = _mm_load_pd(&s.x[i]);
        __m128d ys = _mm_load_pd(&s.y[i]);
        _mm_store_pd(&s.z[i], _mm_mul_pd(xs, ys) );
    }
    return;
}

Assembly:

_Z1fR1Sm:
.LFB525:
        .cfi_startproc
        xorl        %eax, %eax
        testq       %rsi, %rsi
        movq        (%rdi), %r8     # (1)
        movq        8(%rdi), %rcx   # (2)
        movq        16(%rdi), %rdx  # (3)
        je  .L1
.L5:
        movapd      (%r8,%rax,8), %xmm0
        mulpd       (%rcx,%rax,8), %xmm0
        movaps      %xmm0, (%rdx,%rax,8)
        addq        $2, %rax
        cmpq        %rax, %rsi
        ja  .L5
.L1:
        rep ret

Please note that unlike the previous code, (1), (2), (3) are outside the loop.

I would appreciate an explanation of why these two assembly codes are different. Is memory smoothing relevant here? Thanks.

$gcc --version gcc (Debian 5.2.1-21) 5.2.1 20151003

+4
1

, gcc s.x s.y , gcc , &s.z[i] i S f(S&, size_t).

gcc 5.2.0, __restrict__ to S::z S f(), :

struct S {
    double *x;
    double *y;
    double *__restrict__ z;
};

void f(S&__restrict__ s, size_t n) {
    for (int i = 0; i < n; i += 2) {
        __m128d xs = _mm_load_pd(&s.x[i]);
        __m128d ys = _mm_load_pd(&s.y[i]);
        _mm_store_pd(&s.z[i], _mm_mul_pd(xs, ys));
    }
    return;
}

.. gcc :

__Z1fR1Sm:
LFB518:
    testq   %rsi, %rsi
    je  L1
    movq    (%rdi), %r8
    xorl    %eax, %eax
    movq    8(%rdi), %rcx
    movq    16(%rdi), %rdx
    .align 4,0x90
L4:
    movapd  (%r8,%rax,8), %xmm0
    mulpd   (%rcx,%rax,8), %xmm0
    movaps  %xmm0, (%rdx,%rax,8)
    addq    $2, %rax
    cmpq    %rax, %rsi
    ja  L4
L1:
    ret

Apple Clang 700.1.76 __restrict__ S:

__Z1fR1Sm:                              ## @_Z1fR1Sm
    .cfi_startproc
## BB#0:
    pushq   %rbp
Ltmp0:
    .cfi_def_cfa_offset 16
Ltmp1:
    .cfi_offset %rbp, -16
    movq    %rsp, %rbp
Ltmp2:
    .cfi_def_cfa_register %rbp
    testq   %rsi, %rsi
    je  LBB0_3
## BB#1:                                ## %.lr.ph
    movq    (%rdi), %rax
    movq    8(%rdi), %rcx
    movq    16(%rdi), %rdx
    xorl    %edi, %edi
    .align  4, 0x90
LBB0_2:                                 ## =>This Inner Loop Header: Depth=1
    movapd  (%rax,%rdi,8), %xmm0
    mulpd   (%rcx,%rdi,8), %xmm0
    movapd  %xmm0, (%rdx,%rdi,8)
    addq    $2, %rdi
    cmpq    %rsi, %rdi
    jb  LBB0_2
LBB0_3:                                 ## %._crit_edge
    popq    %rbp
    retq
    .cfi_endproc
+2

Source: https://habr.com/ru/post/1612059/


All Articles