I am working on a basic Tensorflow service example. I follow the MNIST example, but instead of classifying, I want to use a numpy array to predict another numpy array .
For this, I first trained my neural network
x = tf.placeholder("float", [None, n_input],name ="input_values")
weights = {
'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'encoder_h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),
'decoder_h1': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_2])),
'decoder_h2': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
'decoder_h3': tf.Variable(tf.random_normal([n_hidden_1, n_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'encoder_b3': tf.Variable(tf.random_normal([n_hidden_3])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b2': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b3': tf.Variable(tf.random_normal([n_input])),
}
def encoder(x):
layer_1 = tf.nn.tanh(tf.matmul(x, weights['encoder_h1'])+biases['encoder_b1'])
print(layer_1.shape)
layer_2 = tf.nn.tanh(tf.matmul(layer_1, weights['encoder_h2'])+biases['encoder_b2'])
print(layer_2.shape)
layer_3 = tf.nn.tanh(tf.matmul(layer_2, weights['encoder_h3'])+biases['encoder_b3'])
print(layer_3.shape)
return layer_3
def decoder(x):
layer_1 = tf.nn.tanh(tf.matmul(x, weights['decoder_h1'])+biases['decoder_b1'])
print(layer_1.shape)
layer_2 = tf.nn.tanh(tf.matmul(layer_1, weights['decoder_h2'])+biases['decoder_b2'])
layer_3 = tf.nn.tanh(tf.matmul(layer_2, weights['decoder_h3'])+biases['decoder_b3'])
return layer_3
encoder_op = encoder(x)
decoder_op = decoder(encoder_op)
y = decoder_op
y_ = tf.placeholder("float", [None,n_input],name="predict")
Further, as someone suggested here, I saved my network like this.
import os
import sys
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import utils
from tensorflow.python.saved_model import tag_constants, signature_constants
from tensorflow.python.saved_model.signature_def_utils_impl import build_signature_def, predict_signature_def
from tensorflow.contrib.session_bundle import exporter
with tf.Session() as sess:
sess.run(init)
saver.restore(sess, model_path)
print("Model restored from file: %s" % save_path)
export_path = '/tmp/AE_model/6'
print('Exporting trained model to', export_path)
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
signature = predict_signature_def(inputs={'inputs': x},
outputs={'outputs': y})
builder.add_meta_graph_and_variables(sess=sess,
tags=[tag_constants.SERVING],
signature_def_map={'predict': signature})
builder.save()
print 'Done exporting!'
Next, follow the instructions to run my server on localhost: 9000
bazel build
I configured the server
bazel-bin/tensorflow_serving/model_servers/tensorflow_model_server --port=9000 --model_base_path=/tmp/AE_model/
PROBLEM
Now I want to write a program so that I can transfer Mat vectors from C ++ - programs in eclipse (I use LOT libraries) to my server so that I can make some predictions.
inception_client.cc . , Bazel , prediction_service.grpc.pb.h : (
, - script python. :
<grpc.beta._client_adaptations._Rendezvous object at 0x7f9bcf8cb850>
.
.
EDIT:
protobuf grpc , :
, ( Ubuntu 14.04).
sudo protoc -I=serving -I serving/tensorflow --grpc_out=. --plugin=protoc-gen-grpc=`which grpc_cpp_plugin` serving/tensorflow_serving/apis/*.proto
.gprc.pb.h, /apis/, .
/tensorflow/third_party/eigen3/unsupported/Eigen/CXX11/Tensor:1:42: fatal error: unsupported/Eigen/CXX11/Tensor: No such file or directory
, . .
@subzero!
EDIT 2
Eigen, Eigen . /usr/local/include/eigen 3/
tensorflow. , libtensorflow_cc.so, lababidi.
https://github.com/tensorflow/tensorflow/issues/2412
. , :
undefined `tensorflow:: serve:: PredictRequest:: ~ PredictRequest() '
, . - , ?