A chart designation would do this for you. Essentially, you give each layer a unique descriptor, and then refer to the previous layer using the descriptor in brackets at the end:
layer_handle = Layer(params)(prev_layer_handle)
Please note that the first level should be Input(shape=(x,y))without prior connection.
, , , :
model = Model(inputs=[in_layer1, in_layer2, ..], outputs=[out_layer1, out_layer2, ..])
, , , :
model.fit([x_train1, x_train2, ..], [y_train1, y_train2, ..])
- , , , :
from keras.models import Model
from keras.layers import Input, Convolution2D, Flatten, Dense, Concatenate
in1 = Input(shape=(28,28,1))
model_one_conv_1 = Convolution2D(32, (3, 3), activation='relu')(in1)
model_one_flat_1 = Flatten()(model_one_conv_1)
model_one_dense_1 = Dense(128, activation='relu')(model_one_flat_1)
in2 = Input(shape=(784, ))
model_two_dense_1 = Dense(128, activation='relu')(in2)
model_two_dense_2 = Dense(128, activation='relu')(model_two_dense_1)
model_final_concat = Concatenate(axis=-1)([model_one_dense_1, model_two_dense_2])
model_final_dense_1 = Dense(10, activation='softmax')(model_final_concat)
model = Model(inputs=[in1, in2], outputs=model_final_dense_1)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit([X_train_one, X_train_two], Y_train,
batch_size=32, nb_epoch=10, verbose=1)
API . Keras 'repo, .