Tensorflow shuffle_batch speed

I noticed a big difference in speed, if I load my training data into memory and transfer it to the graph as a numpy vs array using a shuffle batch of the same size, my data has ~ 1000 copies.

Using 1000 iterations takes less than a few seconds, but using a batch in random order takes almost 10 minutes. I get the shuffle packet should be a little slower, but it seems too slow. Why is this?

Added generosity. Any suggestions on how to make shuffled mini-parties faster?

Here are the training data: Link to bounty_training.csv (pastebin)

Here is my code:

shuffle_batch

import numpy as np
import tensorflow as tf

data = np.loadtxt('bounty_training.csv',
    delimiter=',',skiprows=1,usecols = (0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))

filename = "test.tfrecords"

with tf.python_io.TFRecordWriter(filename) as writer:
    for row in data:
        features, label = row[:-1], row[-1]
        example = tf.train.Example()
        example.features.feature['features'].float_list.value.extend(features)
        example.features.feature['label'].float_list.value.append(label)
        writer.write(example.SerializeToString())

def read_and_decode_single_example(filename):
    filename_queue = tf.train.string_input_producer([filename],
                                                   num_epochs=None)
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)

    features = tf.parse_single_example(
        serialized_example,
        features={
            'label': tf.FixedLenFeature([], np.float32),
            'features': tf.FixedLenFeature([14], np.float32)})

    pdiff = features['label']
    avgs = features['features']

    return avgs, pdiff

avgs, pdiff = read_and_decode_single_example(filename)


n_features = 14
batch_size = 1000
hidden_units = 7
lr = .001

avgs_batch, pdiff_batch = tf.train.shuffle_batch(
    [avgs, pdiff], batch_size=batch_size,
    capacity=5000,
    min_after_dequeue=2000)

X = tf.placeholder(tf.float32,[None,n_features])
Y = tf.placeholder(tf.float32,[None,1])

W = tf.Variable(tf.truncated_normal([n_features,hidden_units]))
b = tf.Variable(tf.zeros([hidden_units]))

Wout = tf.Variable(tf.truncated_normal([hidden_units,1]))
bout = tf.Variable(tf.zeros([1]))

hidden1 = tf.matmul(X,W) + b
pred = tf.matmul(hidden1,Wout) + bout

loss = tf.reduce_mean(tf.squared_difference(pred,Y))

optimizer = tf.train.AdamOptimizer(lr).minimize(loss)

with tf.Session() as sess:
    init = tf.global_variables_initializer()
    sess.run(init)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    for step in range(1000):
        x_, y_ = sess.run([avgs_batch,pdiff_batch])

        _, loss_val = sess.run([optimizer,loss],
              feed_dict={X: x_, Y: y_.reshape(batch_size,1)} )

        if step % 100 == 0:
            print(loss_val)


    coord.request_stop()
    coord.join(threads)

Full batch through numpy array

"""
avgs and pdiff loaded into numpy arrays first...
Same model as above
"""
   with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)

        for step in range(1000):
            _, loss_value = sess.run([optimizer,loss],
                    feed_dict={X: avgs,Y: pdiff.reshape(n_instances,1)} )
+4
source share
3

, shuffle_batch, n + 1- enqueue_many = True. :

TFRecordReader ,

def get_batch(batch_size):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)

    batch_list = []
    for i in range(batch_size):
        batch_list.append(serialized_example)

    return [batch_list]

batch_serialized_example = tf.train.shuffle_batch(
 get_batch(batch_size), batch_size=batch_size,
    capacity=100*batch_size,
    min_after_dequeue=batch_size*10,
    num_threads=1, 
    enqueue_many=True)

features = tf.parse_example(
    batch_serialized_example,
    features={
        'label': tf.FixedLenFeature([], np.float32),
        'features': tf.FixedLenFeature([14], np.float32)})

batch_pdiff = features['label']
batch_avgs = features['features']

...
+2

3 - avgs_batch.eval, pdiff_batch.eval sess.run. , , . , eval sess.run.

, TFRecordReader. , .

  • , , , , ;
  • ops tensorflow.python.framework.ops.convert_to_tensor;
  • tf.train.slice_input_producer, ;
  • - , ;
  • , tf.train.batch, .
+3

When using queues to receive data, you should not use feed_dict. Instead, make your graph dependent on input, namely:

  • remove X and Y PlaceHolders
  • use your batch of functions directly

    hidden1 = tf.matmul(avgs_batch,W) + b
    
  • similarly use the batch of tags (pdiff_batch) instead of Y when calculating losses

  • finally just keep a second session.run to calculate the loss directly and without using feed_dict

    # x_, y_ = sess.run([avgs_batch,pdiff_batch])
    # _, loss_val = sess.run([optimizer,loss],
          feed_dict={X: x_, Y: y_.reshape(batch_size,1)} )
    
    _, loss_val = sess.run([optimizer,loss])
    
0
source

Source: https://habr.com/ru/post/1667833/


All Articles