CudaMallocHost vs malloc for better performance shows no difference

I went through this site . From here I got this pinned memory using cudamallocHost, which gives better performance than cudamalloc. Then I use two different simple programs and checked the runtime as

using cudaMallocHost

#include <stdio.h>
#include <cuda.h>

// Kernel that executes on the CUDA device
__global__ void square_array(float *a, int N)
{
  int idx = blockIdx.x * blockDim.x + threadIdx.x;
  if (idx<N) a[idx] = a[idx] * a[idx];
}

// main routine that executes on the host
int main(void)
{
    clock_t start;
    start=clock();/* Line 8 */
    clock_t finish;
  float *a_h, *a_d;  // Pointer to host & device arrays
  const int N = 100000;  // Number of elements in arrays
  size_t size = N * sizeof(float);
  cudaMallocHost((void **) &a_h, size);
  //a_h = (float *)malloc(size);        // Allocate array on host
  cudaMalloc((void **) &a_d, size);   // Allocate array on device
  // Initialize host array and copy it to CUDA device
  for (int i=0; i<N; i++) a_h[i] = (float)i;
  cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
  // Do calculation on device:
  int block_size = 4;
  int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
  square_array <<< n_blocks, block_size >>> (a_d, N);
  // Retrieve result from device and store it in host array
  cudaMemcpy(a_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost);
  // Print results
  for (int i=0; i<N; i++) printf("%d %f\n", i, a_h[i]);
  // Cleanup
  cudaFreeHost(a_h);
  cudaFree(a_d);
  finish = clock() - start;
      double interval = finish / (double)CLOCKS_PER_SEC; 
      printf("%f seconds elapsed", interval);
}

using malloc

#include <stdio.h>
#include <cuda.h>

// Kernel that executes on the CUDA device
__global__ void square_array(float *a, int N)
{
  int idx = blockIdx.x * blockDim.x + threadIdx.x;
  if (idx<N) a[idx] = a[idx] * a[idx];
}

// main routine that executes on the host
int main(void)
{
    clock_t start;
    start=clock();/* Line 8 */
    clock_t finish;
  float *a_h, *a_d;  // Pointer to host & device arrays
  const int N = 100000;  // Number of elements in arrays
  size_t size = N * sizeof(float);
  a_h = (float *)malloc(size);        // Allocate array on host
  cudaMalloc((void **) &a_d, size);   // Allocate array on device
  // Initialize host array and copy it to CUDA device
  for (int i=0; i<N; i++) a_h[i] = (float)i;
  cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
  // Do calculation on device:
  int block_size = 4;
  int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
  square_array <<< n_blocks, block_size >>> (a_d, N);
  // Retrieve result from device and store it in host array
  cudaMemcpy(a_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost);
  // Print results
  for (int i=0; i<N; i++) printf("%d %f\n", i, a_h[i]);
  // Cleanup
  free(a_h); cudaFree(a_d);
  finish = clock() - start;
      double interval = finish / (double)CLOCKS_PER_SEC; 
      printf("%f seconds elapsed", interval);
}

here, during the execution of both programs, the execution time was almost the same. Is there something wrong with the implementation? What is the exact difference in performance in cudamalloc and cudamallochost?

, and also at each start execution time decreases

+4
source share
2 answers

, . , mememory. / , . , nvprof.

cudaMallocHost - malloc , "" , . , malloc. , ( "" ) .

, , , cudaMemcpy , cudaMallocHost , malloc.

, ?

  • (.. cudaMallocHost), . , , /.
  • (cudaMemcpyAsync) ( ) . .
+5

, cudaHostAlloc/cudaMallocHost . , nvprof -print-gpu-trace , memcpyHtoD memcpyDtoH. PCI2.0 6-8 /.

, cudaMemcpyAsync. , cudaMemcpyAsync, , . "" memcpys .

, , .

+2

Source: https://habr.com/ru/post/1536946/


All Articles