From transaction data to a list of collections in an efficient way

I have a csv file with transaction data of the following form

import pandas as pd
df = pd.DataFrame({'OrderID':[1,1,1,1,2,2], 'ItemID':[1,2,3,4,1,2]})
print(df)
   ItemID  OrderID
0       1        1
1       2        1
2       3        1
3       4        1
4       1        2
5       2        2

I want to get a list that contains for each OrderID element sets.

This can be obtained using

df.groupby('OrderID').apply(lambda x: set(x['ItemID'])).tolist()
[{1, 2, 3, 4}, {1, 2}]

However, in a csv file with 9 million lines, this takes some time. So I am wondering if there is a faster way? I am interested in any solution with pandas or work directly with the .csv file


First of all, I want to thank you guys for your amazing contribution! I took a sample of 50,000 OrderIds (and corresponding elements) from my real data and applied several methods from the data set. And here are the results

Benchmarkresults

, pir. , divakar, .

5,05 , - 2,32. 115 ! !

+4
3

# 1: set -

def divakar_v1(df):
    a = df.values
    sidx = a[:,1].argsort() # Use .argsort(kind='mergesort') to keep order
    cut_idx = np.nonzero(a[sidx[1:],1] > a[sidx[:-1],1])[0]+1
    out = np.split(a[sidx,0], cut_idx)
    return list(map(set,out))

# 2: set -

def divakar_v2(df):
    data = df.values
    a = data[data[:,1].argsort()] # Use .argsort(kind='mergesort') to keep order
    stop = np.append(np.nonzero(a[1:,1] > a[:-1,1])[0]+1,a.size)
    start = np.append(0, stop[:-1])
    out_set = [set(a[start[i]:stop[i],0]) for i in range(len(start))]
    return out_set

, 'OrderID' / 'ItemID', , set() , , . .

№ 3: list of lists as o/p -

def divakar_v3(df):
    a = df.values
    sidx = a[:,1].argsort() # Use .argsort(kind='mergesort') to keep order
    cut_idx = np.nonzero(a[sidx[1:],1] > a[sidx[:-1],1])[0]+1
    out = np.split(a[sidx,0], cut_idx)
    return list(map(list,out))

№ 4: list of lists as o/p -

def divakar_v4(df):
    data = df.values
    a = data[data[:,1].argsort()] # Use .argsort(kind='mergesort') to keep order
    stop = np.append(np.nonzero(a[1:,1] > a[:-1,1])[0]+1,a.size)
    start = np.append(0, stop[:-1])
    a0 = a[:,0].tolist()
    return [a0[start[i]:stop[i]] for i in range(len(start))]

-

In [145]: np.random.seed(123)
     ...: N = 100000
     ...: df = pd.DataFrame(np.random.randint(30,size=(N,2)))
     ...: df.columns = ['ItemID','OrderID']
     ...: 

In [146]: %timeit divakar_v1(df)
     ...: %timeit divakar_v2(df)
     ...: %timeit divakar_v3(df)
     ...: %timeit divakar_v4(df)
     ...: 
10 loops, best of 3: 21.1 ms per loop
10 loops, best of 3: 21.7 ms per loop
100 loops, best of 3: 16.7 ms per loop
100 loops, best of 3: 12.3 ms per loop
+3


defaultdict

from collections import defaultdict

def pir(df):
    d = defaultdict(set)
    for n, g in df.groupby('OrderID').ItemID:
        d[n].update(g.values.tolist())

    return list(d.values())

df = pd.DataFrame(dict(OrderID=np.random.randint(0, 1000, 10000000),
                       ItemID=np.random.randint(0, 1000, 10000000)))

enter image description here


uo, io = np.unique(df.OrderID.values, return_inverse=True)
ui, ii = np.unique(df.ItemID.values, return_inverse=True)

def gu(i):
    return set(ui[ii[io == i]].tolist())

[gu(i) for i in range(len(uo))]

[{1, 2, 3, 4}, {1, 2}]


:

def pir(df):
    uo, io = np.unique(df.OrderID.values, return_inverse=True)
    ui, ii = np.unique(df.ItemID.values, return_inverse=True)

    def gu(i):
        return set(ui[ii[io == i]].tolist())

    return [gu(i) for i in range(len(uo))]

def jez(df):
    arr = df.groupby('OrderID')['ItemID'].unique().values
    return [set(v) for v in arr]

def div(df):
    a = df.values
    sidx = a[:,1].argsort(kind='mergesort')
    cut_idx = np.nonzero(a[sidx[1:],1] > a[sidx[:-1],1])[0]+1
    out = np.split(a[sidx,0], cut_idx)
    return list(map(set,out))

def quik(df):
    return df.groupby('OrderID').apply(lambda x: set(x['ItemID'])).tolist()


enter image description here

df = pd.DataFrame(dict(OrderID=np.random.randint(0, 10, 10000),
                       ItemID=np.random.randint(0, 10, 10000)))

enter image description here

df = pd.DataFrame(dict(OrderID=np.random.randint(0, 10, 10000000),
                       ItemID=np.random.randint(0, 10, 10000000)))

enter image description here

+5

SeriesGroupBy.unique, numpy array set list comprehension:

arr = df.groupby('OrderID')['ItemID'].unique().values
print (arr)
[array([1, 2, 3, 4], dtype=int64) array([1, 2], dtype=int64)]

print ([set(v) for v in arr])
[{1, 2, 3, 4}, {1, 2}]

EDIT Faster unique apply:

print (df.groupby('OrderID').apply(lambda x: set(x['ItemID'].unique())).tolist())

np.random.seed(123)
N = 1000000
df = pd.DataFrame(np.random.randint(30,size=(N,2)))
df.columns = ['OrderID','ItemID']

def pir(df):
    uo, io = np.unique(df.OrderID.values, return_inverse=True)
    ui, ii = np.unique(df.ItemID.values, return_inverse=True)
    def gu(i):
        return set(ui[ii[io == i]].tolist())
    return [gu(i) for i in range(len(uo))]

def divakar(df):
    a = df.values
    sidx = a[:,1].argsort(kind='mergesort')
    cut_idx = np.nonzero(a[sidx[1:],1] > a[sidx[:-1],1])[0]+1
    out = np.split(a[sidx,0], cut_idx)
    return list(map(set,out))
In [120]: %timeit (df.groupby('OrderID')
                     .apply(lambda x: set(x['ItemID'].unique())).tolist())
10 loops, best of 3: 92.7 ms per loop


In [121]: %timeit (df.groupby('OrderID').apply(lambda x: set(x['ItemID'])).tolist())
10 loops, best of 3: 168 ms per loop

In [122]: %timeit ([set(v) for v in df.groupby('OrderID')['ItemID'].unique().values])
10 loops, best of 3: 125 ms per loop

In [123]: %timeit (list(map(set,df.groupby('OrderID')['ItemID'].unique().values)))
10 loops, best of 3: 125 ms per loop

In [124]: %timeit (pir(df))
1 loop, best of 3: 276 ms per loop

In [125]: %timeit (divakar(df))
1 loop, best of 3: 190 ms per loop
+3

Source: https://habr.com/ru/post/1662603/


All Articles