Scipy stats.entropy 1D, , . broadcasting, .
docs -
scipy.stats.entropy(pk, qk = None, base = None)
pk, S = -sum (pk * log (pk), = 0).
qk None, Kullback-Leibler S = sum (pk * log (pk/qk), = 0).
, , . , (M,M), M - .
, stats.entropy() axis=0, distributions, rowth-dimension, axis=0 - (M,1) (1,M), (M,M) broadcasting.
, -
from scipy import stats
kld = stats.entropy(distributions.T[:,:,None], distributions.T[:,None,:])
-
In [15]: def entropy_loopy(distrib):
...: n = distrib.shape[0]
...: kld = np.zeros((n, n))
...: for i in range(0, n):
...: for j in range(0, n):
...: if(i != j):
...: kld[i, j] = stats.entropy(distrib[i, :], distrib[j, :])
...: return kld
...:
In [16]: distrib = np.random.randint(0,9,(100,100))
In [17]: out = stats.entropy(distrib.T[:,:,None], distrib.T[:,None,:])
In [18]: np.allclose(entropy_loopy(distrib),out)
Out[18]: True
In [19]: %timeit entropy_loopy(distrib)
1 loops, best of 3: 800 ms per loop
In [20]: %timeit stats.entropy(distrib.T[:,:,None], distrib.T[:,None,:])
10 loops, best of 3: 104 ms per loop