2017-05-10 44 views
0

我想尝试bidirectional_rnn来预测时间序列。代码是: #BiRNN_model.pybidirectional_rnn:输入必须是一个序列

import tensorflow as tf 

类BiRNN(对象):

""" 
a bidirection RNN 
""" 

def __init__(self, in_size, out_size, num_steps=20, cell_size=20, batch_size=50, 
      num_layers=2, keep_prob=0.5, is_training=True): 
    """ 
    :param in_size: int, the dimension of input 
    :param out_size: int, the dimension of output 
    :param num_steps: int, the number of time steps 
    :param cell_size: int, the size of lstm cell 
    :param batch_size: int, the size of mini bacth 
    :param num_layers: int, the number of cells 
    :param keep_prob: float, the keep probability of dropout layer 
    :param is_training: bool, set True for training model, but False for test model 
    """ 
    self.in_size = in_size 
    self.out_size = out_size 
    self.num_steps = num_steps 
    self.cell_size = cell_size 
    self.batch_size = batch_size 
    self.num_layers = num_layers 
    self.keep_prob = keep_prob 
    self.is_training = is_training 
    self.__build_model__() 

def __build_model__(self): 
    """ 
    The inner method to construct the BiRNN model. 
    """ 
    # Input and output placeholders 
    self.x = tf.placeholder(tf.float32, shape=[None, self.num_steps, self.in_size]) 
    self.y = tf.placeholder(tf.float32, shape=[None, self.num_steps, self.out_size]) 

    # Add the first input layer 
    with tf.variable_scope("input"): 
     # Reshape x to 2-D tensor 
     inputs = tf.reshape(self.x, shape=[-1, self.in_size]) # [batch_size*num_steps, in_size] 
     W, b = self._get_weight_bias(self.in_size, self.cell_size) 
     inputs = tf.nn.xw_plus_b(inputs, W, b, name="input_xW_plus_b") 
     # inputs = tf.matmul(inputs,W)+b 
    # Reshep to 3-D tensor 
    #inputs = tf.reshape(inputs, shape=[-1, self.num_steps, self.cell_size]) # [batch_size, num_steps, in_size] 
    inputs = tf.reshape(inputs, shape=[-1, self.in_size]) 

    # Dropout the inputs 
    if self.is_training and self.keep_prob < 1.0: 
     inputs = tf.nn.dropout(inputs, keep_prob=self.keep_prob) 

    #Construct birnn cells 
    biRNN_fw_cell = tf.contrib.rnn.BasicRNNCell(num_units = self.cell_size) 
    biRNN_bw_cell = tf.contrib.rnn.BasicRNNCell(num_units = self.cell_size) 
    if self.is_training and self.keep_prob < 1.0: 
     fw_cell = tf.contrib.rnn.DropoutWrapper(biRNN_fw_cell, output_keep_prob=self.keep_prob) 
     bw_cell = tf.contrib.rnn.DropoutWrapper(biRNN_fw_cell, output_keep_prob=self.keep_prob) 
    cell_Fw = tf.contrib.rnn.MultiRNNCell([fw_cell] * self.num_layers) 
    cell_Bw = tf.contrib.rnn.MultiRNNCell([bw_cell] * self.num_layers) 

    #the initial state 
    self.init_state_fw = cell_Fw.zero_state(self.batch_size, dtype=tf.float32) 
    self.init_state_bw = cell_Bw.zero_state(self.batch_size, dtype=tf.float32) 

    #add biRNN layer 
    with tf.variable_scope("BRNN"): 
     outputs,final_state_fw,final_state_bw = tf.contrib.rnn.static_bidirectional_rnn(cell_Fw,cell_Bw,inputs, 
                         initial_state_fw =self.init_state_fw, 
                         initial_state_bw = self.init_state_bw 
                         ) 
    self.final_state_fw = final_state_fw 
    self.final_state_bw = final_state_bw 

    # Add the output layer 
    with tf.variable_scope("output"): 
     output = tf.reshape(outputs, shape=[-1, self.cell_size]) 
     W, b = self._get_weight_bias(self.cell_size, self.out_size) 
     output = tf.nn.xw_plus_b(output, W, b, name="output") 

    self.pred = output 
    losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([tf.reshape(self.pred, [-1, ])], 
                   [tf.reshape(self.y, [-1, ])], 
                   [tf.ones([self.batch_size * self.num_steps])], 
                   average_across_timesteps=True, 
                   softmax_loss_function=self._ms_cost) 
    self.cost = tf.reduce_sum(losses)/tf.to_float(self.batch_size) 

def _ms_cost(self, y_pred, y_target): 
    """The quadratic cost function""" 
    return 0.5 * tf.square(y_pred - y_target) 

def _get_weight_bias(self, in_size, out_size): 
    """ 
    Create weight and bias variables 
    """ 
    weights = tf.get_variable("weight", shape=[in_size, out_size], 
           initializer=tf.random_normal_initializer(mean=0.0, stddev=1.0)) 
    biases = tf.get_variable("bias", shape=[out_size, ], initializer=tf.constant_initializer(0.1)) 
    return weights, biases 

但是,如果我运行的代码,这是一个错误: 文件” ../model/ BiRNN_model.py“,行70,build_model initial_state_bw = self.init_state_bw 文件”/home/lucas/.local/lib/python3.5/site-packages/tensorflow/contrib/rnn/python/ops/core_rnn .py“,第328行,在static_bidirectional_rnn中 raise TypeError(”inputs必须是序列“) TypeError:输入必须是序列

static_bidirectional_rnn中的输入参数不是序列。我是tensorflow和深度学习的新手,我花了很多天的时间来尝试修复这个错误,但是我失败了。有人可以帮助我吗?谢谢。

回答

0

我假设你想创建一个完全展开的双向循环神经网络,因为这个函数'static_bidirectional_rnn'被使用。该函数需要一个张量序列,所以输入应该在时间步骤方向上解压缩(在最近的tf版本中为'unpackack')。

的错误是在这一行,因为它不包含时间步骤:

inputs = tf.reshape(inputs, shape=[-1, self.in_size]) 

它应该是类似于下面的内容:

inputs = tf.unstack(inputs, self.num_steps, 1) # Unstack to get a list of 'self.num_steps' tensors of shape (batch_size, in_size) 
相关问题