numpy Keras. , , .
def l2_loss_keras(y_true, y_pred):
meshgrid = K.tf.meshgrid(K.arange(im_height), K.arange(im_width))
meshgrid = K.cast(K.transpose(K.stack(meshgrid)), K.floatx())
meshgrid_broadcast = K.expand_dims(K.expand_dims(meshgrid, 0), -2)
y_true_broadcast = K.expand_dims(K.expand_dims(y_true, 1), 2)
diff = meshgrid_broadcast - y_true_broadcast
ground = K.exp(-0.5 * K.sum(K.square(diff), axis=-1) / sigma ** 2)
loss = K.sum(K.square(ground - y_pred), axis=[1, 2])
return K.mean(loss, axis=-1)
:
def l2_loss_numpy(y_true, y_pred):
loss = 0
n = y_true.shape[0]
for j in range(n):
for i in range(num_joints):
yv, xv = np.meshgrid(np.arange(0, im_height), np.arange(0, im_width))
z = np.stack([xv, yv]).transpose(1, 2, 0)
ground = np.exp(-0.5*(((z - y_true[j, i, :])**2).sum(axis=2))/(sigma**2))
loss = loss + np.sum((ground - y_pred[j,:, :, i])**2)
return loss/num_joints
batch_size = 32
num_joints = 10
sigma = 5
im_width = 256
im_height = 256
y_true = 255 * np.random.rand(batch_size, num_joints, 2)
y_pred = 255 * np.random.rand(batch_size, im_height, im_width, num_joints)
print(l2_loss_numpy(y_true, y_pred))
45448272129.0
print(K.eval(l2_loss_keras(K.variable(y_true), K.variable(y_pred))).sum())
4.5448e+10
dtype
float32. dtype
, float64:
y_true = 255 * np.random.rand(batch_size, num_joints, 2)
y_pred = 255 * np.random.rand(batch_size, im_height, im_width, num_joints)
print(l2_loss_numpy(y_true, y_pred))
45460126940.6
print(K.eval(l2_loss_keras(K.variable(y_true), K.variable(y_pred))).sum())
45460126940.6
EDIT:
, Keras , y_true
y_pred
. , :
X = np.random.rand(batch_size, 256, 256, 3)
model = Sequential([Dense(10, input_shape=(256, 256, 3))])
model.compile(loss=l2_loss_keras, optimizer='adam')
model.fit(X, y_true, batch_size=8)
ValueError: Cannot feed value of shape (8, 10, 2) for Tensor 'dense_2_target:0', which has shape '(?, ?, ?, ?)'
, expand_dims
y_true
:
def l2_loss_keras(y_true, y_pred):
...
y_true_broadcast = K.expand_dims(y_true, 1)
...
model.fit(X, np.expand_dims(y_true, axis=1), batch_size=8)