Adding a preprocessing layer to the keras model and setting tensor values

What is the best way to add a preprocessing level (for example, subtract the average value and divide by std) in the keras model (v2.0.5) so that the model is fully included for deployment (possibly in C ++). I tried:

    def getmodel():
       model = Sequential()
       mean_tensor = K.placeholder(shape=(1,1,3), name="mean_tensor")
       std_tensor = K.placeholder(shape=(1,1,3), name="std_tensor")

       preproc_layer = Lambda(lambda x: (x - mean_tensor) / (std_tensor + K.epsilon()),
                              input_shape=im_shape)

       model.add(preproc_layer)

       # Build the remaining model, perhaps set weights,
       ...

       return model

Then, somewhere else, set the average / std value on the model. I found set_value , so I tried the following:

m = getmodel()
mean, std = get_mean_std(..)

graph = K.get_session().graph
mean_tensor = graph.get_tensor_by_name("mean_tensor:0")
std_tensor = graph.get_tensor_by_name("std_tensor:0")

K.set_value(mean_tensor, mean)
K.set_value(std_tensor, std)

However set_valuedoes not work with

AttributeError: 'Tensor' object has no attribute 'assign'

So, it set_valuedoes not work as the (limited) documents suggested. What would be the right way to do this? Get a TF session, wrap all the training code in with (session)and use feed_dict? I would think that there is a proprietary keras method for defining tensor values.

/std , K.variable, K.constant:

mean_tensor = K.variable(mean, name="mean_tensor")
std_tensor = K.variable(std, name="std_tensor")

set_value. , (, , , ), , ModelCheckpoint :

...
File "/Users/dgorissen/Library/Python/2.7/lib/python/site-packages/keras/models.py", line 102, in save_model
  'config': model.get_config()
File "/Users/dgorissen/Library/Python/2.7/lib/python/site-packages/keras/models.py", line 1193, in get_config
  return copy.deepcopy(config)
File "/usr/local/Cellar/python/2.7.12_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/copy.py", line 163, in deepcopy
  y = copier(x, memo)
...
File "/usr/local/Cellar/python/2.7.12_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/copy.py", line 190, in deepcopy
  y = _reconstruct(x, rv, 1, memo)
File "/usr/local/Cellar/python/2.7.12_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/copy.py", line 343, in _reconstruct
  y.__dict__.update(state)
AttributeError: 'NoneType' object has no attribute 'update'

1:

. , , :

# Regular model, trained as usual
model = ...

# Preprocessing model
preproc_model = Sequential()
mean_tensor = K.constant(mean, name="mean_tensor")
std_tensor = K.constant(std, name="std_tensor")
preproc_layer = Lambda(lambda x: (x - mean_tensor) / (std_tensor + K.epsilon()),
                       input_shape=im_shape, name="normalisation")
preproc_model.add(preproc_layer)

# Prepend the preprocessing model to the regular model    
full_model = Model(inputs=[preproc_model.input],
              outputs=[model(preproc_model.output)])

# Save the complete model to disk
full_model.save('full_model.hdf5')

, save(), , . , Lambda , , , , .

, keras ( pb)?

, , TF (, tf.Transform), , .

2:

, , ,

def foo(x):
    bar = K.variable(baz, name="baz")
    return x - bar

, bar .

, , . github. , # 5396, , .

, , " 1" . Model is not compiled. , :

  • .
  • ,
  • pb
  • pb

, , pb. , :

  • , no-op (mean = 0, std = 1)
  • , , /std.
  • pb

, . , .

, keras ( assign), .

@Daniel, .

:

+4
1

, :

mean_tensor = K.variable(mean, name="mean_tensor")
std_tensor = K.variable(std, name="std_tensor")

, Keras, , . , ( ) .

Lambda , , . im_shape, , 3 :

def myFunc(x):

    #reshape x in a way it compatible with the tensors mean and std:
    x = K.reshape(x,(-1,1,1,3)) 
        #-1 is like a wildcard, it will be the value that matches the rest of the given shape.     
        #I chose (1,1,3) because it the same shape of mean_tensor and std_tensor

    result = (x - mean_tensor) / (std_tensor + K.epsilon())

    #now shape it back to the same shape it was before (which I don't know)    
    return K.reshape(result,(-1,im_shape[0], im_shape[1], im_shape[2]))
        #-1 is still necessary, it the batch size

Lambda, , (- )

model.add(Lambda(myFunc,input_shape=im_shape, output_shape=im_shape))

. ( model.compile(...) model.fit(...))


, , :

def myFunc(x):

    mean_tensor = K.mean(x,axis=[0,1,2]) #considering shapes of (size,width, heigth,channels)    
    std_tensor = K.std(x,axis=[0,1,2])

    x = K.reshape(x, (-1,3)) #shapes of mean and std are (3,) here.    
    result = (x - mean_tensor) / (std_tensor + K.epsilon())

    return K.reshape(result,(-1,width,height,3))

. . , . , . ( , ).

+2

Source: https://habr.com/ru/post/1680451/


All Articles