# RNN中的时间序列详细示例解释

RNN和时间序列的数据准备有些棘手。目的是预测该系列的其他值, 我们将使用过去的信息来估算t +1时的成本。标签等于一个周期的输入连续。

``````# To plotting amazing figure
%matplotlib inline
import matplotlib
import pandas as pd
import matplotlib.pyplot as plt
def create_ts(start = '2001', n = 201, freq = 'M'):
ring = pd.date_range(start=start, periods=n, freq=freq)
ts =pd.Series(np.random.uniform(-18, 18, size=len(rng)), ring).cumsum()
return ts
ts= create_ts(start = '2001', n = 192, freq = 'M')
ts.tail(5)``````

``````2016-08-31    -93.459631
2016-09-30    -95.264791
2016-10-31    -95.551935
2016-11-30   -105.879611
2016-12-31   -123.729319
Freq: M, dtype: float64

ts = create_ts(start = '2001', n = 222)``````
``````# Left plotting diagram
plt.figure(figsize=(11, 4))
plt.subplot(121)
plt.plot(ts.index, ts)
plt.plot(ts.index[90:100], ts[90:100], "b-", linewidth=3, label="A train illustration in the plotting area")
plt.title("A time series (generated)", fontsize=14)

## Right side plotted Diagram
plt.subplot(122)
plt.title("A training instance", fontsize=14)
plt.plot(ts.index[90:100], ts[90:100], "b-", markersize=8, label="instance")
plt.plot(ts.index[91:101], ts[91:101], "bo", markersize=10, label="target", markerfacecolor='red')
plt.legend(loc="upper left")
plt.xlabel("Time")
plt.show()``````

## 建立RNN以分析TensorFlow中的时间序列

• 输入数量：1
• 时间步长(时间序列中的窗口)：10
• 神经元数量：120
• 输出数量：1

### 步骤1)创建火车并测试

``````series = np.array(ts)
n_windows = 20
n_input =  1
n_output = 1
size_train = 201``````

``````# Split data
train = series[:size_train]
test = series[size_train:]
print(train.shape, test.shape)
(201) (21)``````

### 步骤2)创建函数return X_batches和y_batches

``````x_data = train[:size_train-1]: Select the training instance.
X_batches = x_data.reshape(-1, Windows, input): creating the right shape for the batch.
def create_batches(df, Windows, input, output):
## Create X
x_data = train[:size_train-1] # Select the data
X_batches = x_data.reshape(-1, windows, input)  # Reshaping the data in this line of code
## Create y
y_data = train[n_output:size_train]
y_batches = y_data.reshape(-1, Windows, output)
return X_batches, y_batches #return the function``````

``````Windows = n_
Windows, # Creating windows
input = n_input, output = n_output)``````

``````print(X_batches.shape, y_batches.shape)
(10, 20, 1) (10, 20, 1)``````

``````X_test, y_test = create_batches(df = test, windows = 20, input = 1, output = 1)
print(X_test.shape, y_test.shape)
(10, 20, 1) (10, 20, 1)``````

### 步骤3)建立模型

1. 带张量的变量
2. RNN
3. 损失与优化

1.变量

• 注意：批量大小
• n_windows：窗户的长度。
• n_input：输入数

``````tf.placeholder(tf.float32, [None, n_windows, n_input])
## 1. Construct the tensors
X = tf.placeholder(tf.float32, [None, n_windows, n_input])
y = tf.placeholder(tf.float32, [None, n_windows, n_output])``````

2.创建RNN

``````## 2. create the model
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=r_neuron, activation=tf.nn.relu)
rnn_output, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)``````

``````stacked_rnn_output = tf.reshape(rnn_output, [-1, r_neuron])
stacked_outputs = tf.layers.dense(stacked_rnn_output, n_output)
outputs = tf.reshape(stacked_outputs, [-1, n_windows, n_output])``````

3.造成损失和优化

``tf.reduce_sum(tf.square(outputs - y))``

``````tf.train.AdamOptimizer(learning_rate=learning_rate)
optimizer.minimize(loss)``````

``````tf.reset_default_graph()
r_neuron = 120

## 1. Constructing the tensors
X = tf.placeholder(tf.float32, [None, n_windows, n_input])
y = tf.placeholder(tf.float32, [None, n_windows, n_output])``````
``````## 2. creating our models
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=r_neuron, activation=tf.nn.relu)
rnn_output, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)

stacked_rnn_output = tf.reshape(rnn_output, [-1, r_neuron])
stacked_outputs = tf.layers.dense(stacked_rnn_output, n_output)
outputs = tf.reshape(stacked_outputs, [-1, n_windows, n_output])

## 3. Loss optimization of RNN
learning_rate = 0.001

loss = tf.reduce_sum(tf.square(outputs - y))
training_op = optimizer.minimize(loss)

init = tf.global_variables_initializer()``````

``````iteration = 1500
with tf.Session() as sess:
init.run()
for iters in range(iteration):
sess.run(training_op, feed_dict={X: X_batches, y: y_batches})
if iters % 150 == 0:
mse = loss.eval(feed_dict={X: X_batches, y: y_batches})
print(iters, "\tMSE:", mse)
y_pred = sess.run(outputs, feed_dict={X: X_test})
"0 	MSE: 502893.34
150 	MSE: 13839.129
300 	MSE: 3964.835
450 	MSE: 2619.885
600 	MSE: 2418.772
750 	MSE: 2110.5923
900 	MSE: 1887.9644
1050 	MSE: 1747.1377
1200 	MSE: 1556.3398
1350  MSE: 1384.6113"``````

``````plt.title("Forecast vs Actual", fontsize=14)
plt.plot(pd.Series(np.ravel(y_test)), "bo", markersize=8, label="actual", color='green')
plt.plot(pd.Series(np.ravel(y_pred)), "r.", markersize=8, label="forecast", color='red')
plt.legend(loc="lower left")
plt.xlabel("Time")
plt.show()``````

``````n_windows = 20
n_input =  1
n_output = 1
size_train = 201``````

``````X = tf.placeholder(tf.float32, [none, n_windows, n_input])
y = tf.placeholder(tf.float32, [none, n_windows, n_output])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=r_neuron, activation=tf.nn.relu)
rnn_output, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)
stacked_rnn_output = tf.reshape(rnn_output, [-1, r_neuron])
stacked_outputs = tf.layers.dense(stacked_rnn_output, n_output)
outputs = tf.reshape(stacked_outputs, [-1, n_windows, n_output])``````

``````learning_rate = 0.001
loss = tf.reduce_sum(tf.square(outputs - y))
training_op = optimizer.minimize(loss)``````

``````init = tf.global_variables_initializer()
iteration = 1500

with tf.Session() as sess:
init.run()
for iters in range(iteration):
sess.run(training_op, feed_dict={X: X_batches, y: y_batches})
if iters % 150 == 0:
mse = loss.eval(feed_dict={X: X_batches, y: y_batches})
print(iters, "\tMSE:", mse)
y_pred = sess.run(outputs, feed_dict={X: X_test})``````

• 回顶