How to control output size from LSTM cell in tensor flow

I tried to build a simple example of using LSTM RNN through Tensorflow to predict the time series values ​​of some target series, given the known input time series.

Link to problem example

I'm trying to

what I'm trying to do formally

In essence, I think that the output of cell A and the following mult matrix should function as:

X = np.zeros([40,2,1])
A = np.zeros([40,1,2])
b = np.arange(0,2)

X = tf.convert_to_tensor(X)
A = tf.convert_to_tensor(A)
b = tf.convert_to_tensor(b)

Y = tf.matmul(X,A)+b

The tensor flow code is configured to view output sizes, rather than using the tf.graph / session function:

import numpy as np
import tkinter
import matplotlib.pyplot as plt
import tensorflow as tf
n=40
x = np.linspace(0,10,n)
y1 = np.sin(x)
y2 = np.cos(x)

x1=np.random.normal(0,y1**2,n)
x2=np.random.normal(0,y2**2,n)

y1=(y1**2>0.4)*1
y2=(y2**2>0.4)*1

ys = np.vstack((y1,y2))
xs = np.vstack((x1,x2))

def plot_results_multiple(xs, ys):
    fig = plt.figure(facecolor='white')
    ax = fig.add_subplot(111)
    for i, data in enumerate(xs):
        plt.plot(data, label='x'+str(i))
        plt.legend()
    for i, data in enumerate(ys):
        plt.plot(data, label='y'+str(i))
        plt.legend()
    plt.show()

plot_results_multiple(xs,ys)

xs = xs.T
ys = ys.T

print("Shape of arrays " +str(xs.shape) + " " +str(ys.shape))


batch_size = 1
lstm_size = 1
nseries = 2
time_steps = 40
nclasses = 2

lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size,state_is_tuple=True)
stacked_lstm = tf.contrib.rnn.MultiRNNCell([lstm] * 2, state_is_tuple=True)

state = lstm.zero_state(batch_size, tf.float32)
inputs = tf.unstack(xs, num=40, axis=0)

outputs = []

with tf.variable_scope("RNN"):
    for timestep in range(time_steps):
        if timestep > 0: tf.get_variable_scope().reuse_variables()
        output, state = lstm(tf.cast(tf.reshape(inputs[timestep],[1,nseries]),tf.float32), state)
        print(tf.convert_to_tensor(output).get_shape())
        outputs.append(output)

print(tf.convert_to_tensor(outputs).get_shape())
output = tf.reshape(tf.concat(outputs, 1), [-1, lstm_size])
softmax_w = tf.get_variable(
    "softmax_w", [time_steps, 1,nclasses],tf.float32)# dtype=
print(softmax_w.get_shape())
softmax_b = tf.get_variable("softmax_b", [nseries], dtype=tf.float32)
print(softmax_b.get_shape())
logits = tf.matmul(output, softmax_w) + softmax_b

print(logits.get_shape())

I think the problem I am facing is how to change the RNN LSTM cell, as it currently outputs the 1x1 tensor from the 2x1 input, where I expect 2x1 to come out. Any help is greatly appreciated.

+4
1

, tf.contrib.rnn.BasicLSTMCell(lstm_size, state_is_tuple)

: https://github.com/tensorflow/tensorflow/blob/b0ecc7d2c1486367ec65d297e372f8935ee3ddfe/tensorflow/python/ops/rnn_cell_impl.py#254

@property
  def output_size(self):
    return self._num_units

, , num_units tf.contrib.rnn.BasicLSTMCell.

+3

Source: https://habr.com/ru/post/1672369/


All Articles