Why doesn't the numpy array look much faster than the standard python list?

From what I understand, numpy arrays can process operations faster than python lists, because they are processed in parallel rather than iterative order. I tried to test it out for fun, but I did not see much of a difference.

Was there something wrong with my test? Does the difference only matter with arrays much larger than the ones I used? I tried to create a python list and a numpy array in each function to undo the differences creating one and the other, but the time delta really seems insignificant. Here is my code:

My final outputs were numpy function: 6.534756324786595s, list function: 6.559365831783256s

import timeit
import numpy as np

a_setup = 'import timeit; import numpy as np'

std_fx = '''
def operate_on_std_array():
    std_arr = list(range(0,1000000))
    np_arr = np.asarray(std_arr)
    for index,elem in enumerate(std_arr):
        std_arr[index] = (elem**20)*63134
    return std_arr
'''
parallel_fx = '''
def operate_on_np_arr():
    std_arr = list(range(0,1000000))
    np_arr = np.asarray(std_arr)
    np_arr = (np_arr**20)*63134
    return np_arr
'''

def operate_on_std_array():
    std_arr = list(range(0,1000000))
    np_arr = np.asarray(std_arr)
    for index,elem in enumerate(std_arr):
        std_arr[index] = (elem**20)*63134
    return std_arr

def operate_on_np_arr():
    std_arr = list(range(0,1000000))
    np_arr = np.asarray(std_arr)
    np_arr = (np_arr**20)*63134
    return np_arr


print('std',timeit.timeit(setup = a_setup, stmt = std_fx, number = 80000000))
print('par',timeit.timeit(setup = a_setup, stmt = parallel_fx, number = 80000000))



#operate_on_np_arr()
#operate_on_std_array()
+4
source share
2

timeit docs , , , -, , . , 80000000 1- .

, :

  • np_arr = (np_arr**20)*63134 np_arr, Python .
  • Numpy Python. 100**20 Python , Python , Numpy C-, . ( , , C, Numpy, , .)

, , , 31 , :

import numpy as np
import timeit

std_arr = list(range(0,100000))
np_arr = np.array(std_arr)
np_arr_vec = np.vectorize(lambda n: (n * 31) / 31)

def operate_on_std_array():
    for index,elem in enumerate(std_arr):
        std_arr[index] = elem * 31
        std_arr[index] = elem / 31
    return std_arr

def operate_on_np_arr():
    np_arr_vec(np_arr)
    return np_arr


import time
def test_time(f):
    count = 100
    start = time.time()
    for i in range(count):
        f()
    dur = time.time() - start
    return dur

print(test_time(operate_on_std_array))
print(test_time(operate_on_np_arr))

:

3.0798873901367188 # standard array time
2.221336841583252 # np array time

: @user2357112, Numpy :

def operate_on_np_arr():
    global np_arr
    np_arr *= 31
    np_arr //= 31 # integer division, not double
    return np_arr

. 0.1248 .

+4

ipython / . :

In [103]: %%timeit alist = list(range(10000))
     ...: for i,e in enumerate(alist):
     ...:    alist[i] = (e*3)*20
     ...: 
4.13 ms ± 146 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)

In [104]: %%timeit arr = np.arange(10000)
     ...: z = (arr*3)*20
     ...: 
20.6 µs ± 439 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)

In [105]: %%timeit alist = list(range(10000))
     ...: z = [(e*3)*20 for e in alist]
     ...: 
     ...: 
1.71 ms ± 2.69 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)

:

In [106]: %%timeit alist = list(range(10000))
     ...: arr = np.array(alist)
     ...: z = (arr*3)*20
     ...: 
     ...: 
1.01 ms ± 43.6 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)

, . **3 , 2 . .

+1

Source: https://habr.com/ru/post/1696027/


All Articles