How to pass base64 encoded image to Tensorflow prediction?

I have a google-cloud-ml model that I can run for forecasting by passing a 3-dimensional array of float32 ...

{ 'instances' [ { 'input': '[ [ [ 0.0 ], [ 0.5 ], [ 0.8 ] ]... ] ]' } ] }

However, this is an inefficient format for transferring images, so I would like to transfer base64 encoded png or jpeg. This document talks about how to do this, but it is not clear what the whole json object looks like. { 'b64': 'x0welkja...' }Whether { 'b64': 'x0welkja...' }instead '[ [ [ 0.0 ], [ 0.5 ], [ 0.8 ] ]... ] ]', leaving the incoming 'instance' and 'input' is the same? Or some other structure? Or should the tensor flow model be trained on base64?

+5
source share
4 answers

TensorFlow base64. . , png jpeg (, , , ) . , , , _bytes. CloudML Engine, base64. :

from tensorflow.contrib.saved_model.python.saved_model import utils

# Shape of [None] means we can have a batch of images.
image = tf.placeholder(shape=[None], dtype=tf.string)
# Decode the image.
decoded = tf.image.decode_jpeg(image, channels=3)
# Do the rest of the processing.
scores = build_model(decoded)

# The input name needs to have "_bytes" suffix.
inputs = {'image_bytes': image}
outputs = {'scores': scores}
utils.simple_save(session, export_dir, inputs, outputs)

:

{"instances": [{"b64": "x0welkja..."}]}
+2

( 64), Google Cloud Storage, GCS. , , , , .., API GCS.

TensorFlow tf.read_file GCS. _fn, . CMLE URL- (gs://bucket/some/path/to/image.jpg)

def read_and_preprocess(filename, augment=False):
    # decode the image file starting from the filename
    # end up with pixel values that are in the -1, 1 range
    image_contents = tf.read_file(filename)
    image = tf.image.decode_jpeg(image_contents, channels=NUM_CHANNELS)
    image = tf.image.convert_image_dtype(image, dtype=tf.float32) # 0-1
    image = tf.expand_dims(image, 0) # resize_bilinear needs batches
    image = tf.image.resize_bilinear(image, [HEIGHT, WIDTH], align_corners=False)
    #image = tf.image.per_image_whitening(image)  # useful if mean not important
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0) # -1 to 1
    return image

def serving_input_fn():
    inputs = {'imageurl': tf.placeholder(tf.string, shape=())}
    filename = tf.squeeze(inputs['imageurl']) # make it a scalar
    image = read_and_preprocess(filename)
    # make the outer dimension unknown (and not 1)
    image = tf.placeholder_with_default(image, shape=[None, HEIGHT, WIDTH, NUM_CHANNELS])

    features = {'image' : image}
    return tf.estimator.export.ServingInputReceiver(features, inputs)

, rhaertel80. . https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive/08_image/flowersmodel/trainer/task.py#L27 , /.

+2

@Lak ( Lak), - json, ( json, [2]):

, [2]

, ML, , - inpur, @Lak , ( , json). , . , -

def read_and_preprocess(filename):
    image_contents = tf.read_file(filename)
    image = tf.image.decode_image(image_contents, channels=NUM_CHANNELS)
    image = tf.image.convert_image_dtype(image, dtype=tf.float32) # 0-1
    return image

def serving_input_fn():
    inputs = {'imageurl': tf.placeholder(tf.string, shape=(None))}
    filename = inputs['imageurl']
    image = tf.map_fn(read_and_preprocess, filename, dtype=tf.float32)
    # make the outer dimension unknown (and not 1)
    image = tf.placeholder_with_default(image, shape=[None, HEIGHT, WIDTH, NUM_CHANNELS])

    features = {'image': image}
    return tf.estimator.export.ServingInputReceiver(features, inputs)

, 1) squeeze ( , json ) 2) tf.map_fn tf.map_fn read_and_preprocess URL .

+1

tenorflow, . keras ML . json ( ), . json ( base64), .

   image = tf.placeholder(shape=[None], dtype=tf.string)
   export_path = 'path-name'

   builder = saved_model_builder.SavedModelBuilder(export_path)

   signature = predict_signature_def(inputs={'image_bytes': image},
                          outputs={'scores': model.output})

   with K.get_session() as sess:
     builder.add_meta_graph_and_variables(sess=sess,
                                 tags=[tag_constants.SERVING],
                                 signature_def_map={
                                     'predict': signature})

   builder.save()   

keras :

    model = Sequential()
    model.add(Conv2D(32, (3, 3), padding="same",input_shape=inputShape))
    model.add(Activation("relu"))
    model.add(BatchNormalization(axis=chanDim))
    model.add(MaxPooling2D(pool_size=(3, 3)))
    model.add(Dropout(0.25))
    model.add(Conv2D(64, (3, 3), padding="same"))
    model.add(Activation("relu"))
    ....... few more layers

float32 . , ? "", ?

In the above sentence, Luck and Milad Shahidi mentioned the use of serve_input_fn (). I am not sure how to include this in my code. Do I need to add it when building the model? or during export to saveModel? Please provide some code snippets to tell you how to enable them.

thank

0
source

Source: https://habr.com/ru/post/1691431/


All Articles