Python tensorflow.python.ops.rnn_cell 模块,LSTMCell() 实例源码

我们从Python开源项目中,提取了以下19个代码示例,用于说明如何使用tensorflow.python.ops.rnn_cell.LSTMCell()

项目:albemarle    作者:SeanTater    | 项目源码 | 文件源码
def RNN(tensor, lens, n_hidden, n_summary, name, reuse):
    with tf.variable_scope(name, reuse) as scope:
        # Define weights
        weights = {
            'out': tf.Variable(tf.random_normal([n_hidden, n_summary]), name=name+"_weights")
        }
        biases = {
            'out': tf.Variable(tf.random_normal([n_summary]), name=name+"_biases")
        }

        # Define a lstm cell with tensorflow
        lstm_cell = rnn_cell.LSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
        # Get lstm cell output
        outputs, states = rnn.rnn(lstm_cell, tensor, sequence_length=lens, dtype=tf.float32, scope=scope)
        # Linear activation, using rnn inner loop last output
        return tf.matmul(outputs[-1], weights['out']) + biases['out']


# Now for parts specific to this data


# Parameters
项目:albemarle    作者:SeanTater    | 项目源码 | 文件源码
def RNN(tensor, n_hidden, n_summary, name, reuse):
    with tf.variable_scope(name, reuse) as scope:
        # Define weights
        weights = {
            'out': tf.Variable(tf.random_normal([n_hidden, n_summary]), name=name+"_weights")
        }
        biases = {
            'out': tf.Variable(tf.random_normal([n_summary]), name=name+"_biases")
        }

        # Define a lstm cell with tensorflow
        lstm_cell = rnn_cell.LSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
        # Get lstm cell output
        outputs, states = rnn.rnn(lstm_cell, tensor, dtype=tf.float32, scope=scope)
        # Linear activation, using rnn inner loop last output
        return tf.matmul(outputs[-1], weights['out']) + biases['out']


# Now for parts specific to this data


# Parameters
项目:albemarle    作者:SeanTater    | 项目源码 | 文件源码
def RNN(tensor, lens, n_hidden, n_summary, name, reuse):
    with tf.variable_scope(name, reuse) as scope:
        # Define weights
        weights = {
            'out': tf.Variable(tf.random_normal([n_hidden, n_summary]), name=name+"_weights")
        }
        biases = {
            'out': tf.Variable(tf.random_normal([n_summary]), name=name+"_biases")
        }

        # Define a lstm cell with tensorflow
        lstm_cell = rnn_cell.LSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
        # Get lstm cell output
        outputs, states = rnn.rnn(lstm_cell, tensor, sequence_length=lens, dtype=tf.float32, scope=scope)
        # Linear activation, using rnn inner loop last output
        return tf.matmul(outputs[-1], weights['out']) + biases['out']


# Now for parts specific to this data


# Parameters
项目:RecursiveNN    作者:sapruash    | 项目源码 | 文件源码
def compute_states(self,emb):

        def unpack_sequence(tensor):
            return tf.unpack(tf.transpose(tensor, perm=[1, 0, 2]))


        with tf.variable_scope("Composition",initializer=
                               tf.contrib.layers.xavier_initializer(),regularizer=
                               tf.contrib.layers.l2_regularizer(self.reg)):
            cell = rnn_cell.LSTMCell(self.hidden_dim)
            #tf.cond(tf.less(self.dropout
            #if tf.less(self.dropout, tf.constant(1.0)):
            cell = rnn_cell.DropoutWrapper(cell,
                                           output_keep_prob=self.dropout,input_keep_prob=self.dropout)
            #output, state = rnn.dynamic_rnn(cell,emb,sequence_length=self.lngths,dtype=tf.float32)
            outputs,_=rnn.rnn(cell,unpack_sequence(emb),sequence_length=self.lngths,dtype=tf.float32)
            #output = pack_sequence(outputs)

        sum_out=tf.reduce_sum(tf.pack(outputs),[0])
        sent_rep = tf.div(sum_out,tf.expand_dims(tf.to_float(self.lngths),1))
        final_state=sent_rep
        return final_state
项目:pred_finance    作者:jjasonn0717    | 项目源码 | 文件源码
def createRNN(self):
        with self.sess.graph.as_default():
            self.prob = tf.placeholder("float", name="keep_prob")
            # input layer #
            with tf.name_scope("input"):
                self.s = tf.placeholder("float", [None, DAYS_RANGE, INPUT_DIM], name='input_state')
                s_tran = tf.transpose(self.s, [1, 0, 2])
                s_re = tf.reshape(s_tran, [-1, INPUT_DIM])
                s_list = tf.split(0, DAYS_RANGE, s_re) ## split s to DAYS_RANGE tensor of shape [BATCH, INPUT_DIM]

            lstm_cell = rnn_cell.LSTMCell(1024, use_peepholes=True, forget_bias=1.0, state_is_tuple=True)
            lstm_drop = rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=self.prob)
            lstm_stack = rnn_cell.MultiRNNCell([lstm_cell]*3, state_is_tuple=True)

            lstm_output, hidden_states = rnn.rnn(lstm_stack, s_list, dtype='float', scope='LSTMStack') # out: [timestep, batch, hidden], state: [cell, c+h, batch, hidden]

            h_fc1 = self.FC_layer(lstm_output[-1], [1024, 1024], name='h_fc1', activate=True)
            h_fc1_d = tf.nn.dropout(h_fc1, keep_prob=self.prob, name='h_fc1_drop')
            h_fc2 = self.FC_layer(h_fc1_d, [1024, ACTIONS], name='h_fc2', activate=False)

            # output layer #
            self.pred_action = tf.nn.softmax(h_fc2)
项目:rnnprop    作者:vfleaking    | 项目源码 | 文件源码
def _build_pre(self):
        self.dimH = 20
        self.cellH = MultiRNNCell([LSTMCell(self.dimH)] * 2)
        self.lr = 0.1
项目:rnnprop    作者:vfleaking    | 项目源码 | 文件源码
def _build_pre(self):
        self.dimA = 20
        self.cellA = MultiRNNCell([LSTMCell(self.dimA)] * 2)
        self.b1 = 0.95
        self.b2 = 0.95
        self.lr = 0.1
        self.eps = 1e-8
项目:SLAM    作者:sanjeevkumar42    | 项目源码 | 文件源码
def build_graph(self):
        with tf.variable_scope('lstm'):
            lstm_cell = LSTMCell(self.layer_size)
            rnn_cell = MultiRNNCell([lstm_cell] * self.layers)
            cell_output, self.init_state = rnn_cell(self.model_input, self.init_state)
            print("%i layers created" % self.layers)
            self.output_layer = self.__add_output_layer("fc_out", cell_output, self.layer_size, self.output_dim)

            self.output_layer = tf.Print(self.output_layer, [self.output_layer, tf.convert_to_tensor(self.ground_truth)],
                                          'Value of output layer and ground truth:', summarize=6)

            tf.histogram_summary('lstm_output', self.output_layer)

            return self.output_layer
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self, num_units, use_peepholes=False, forget_bias=1.0):
    super(Grid1LSTMCell, self).__init__(
        num_units=num_units, num_dims=1,
        input_dims=0, output_dims=0, priority_dims=0,
        cell_fn=lambda n, i: rnn_cell.LSTMCell(
            num_units=n, input_size=i, use_peepholes=use_peepholes,
            forget_bias=forget_bias, state_is_tuple=False))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               num_units,
               tied=False,
               non_recurrent_fn=None,
               use_peepholes=False,
               forget_bias=1.0):
    super(Grid2LSTMCell, self).__init__(
        num_units=num_units, num_dims=2,
        input_dims=0, output_dims=0, priority_dims=0, tied=tied,
        non_recurrent_dims=None if non_recurrent_fn is None else 0,
        cell_fn=lambda n, i: rnn_cell.LSTMCell(
            num_units=n, input_size=i, forget_bias=forget_bias,
            use_peepholes=use_peepholes, state_is_tuple=False),
        non_recurrent_fn=non_recurrent_fn)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               num_units,
               tied=False,
               non_recurrent_fn=None,
               use_peepholes=False,
               forget_bias=1.0):
    super(Grid3LSTMCell, self).__init__(
        num_units=num_units, num_dims=3,
        input_dims=0, output_dims=0, priority_dims=0, tied=tied,
        non_recurrent_dims=None if non_recurrent_fn is None else 0,
        cell_fn=lambda n, i: rnn_cell.LSTMCell(
            num_units=n, input_size=i, forget_bias=forget_bias,
            use_peepholes=use_peepholes, state_is_tuple=False),
        non_recurrent_fn=non_recurrent_fn)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               num_units,
               tied=False,
               non_recurrent_fn=None,
               use_peepholes=False,
               forget_bias=1.0):
    super(Grid2LSTMCell, self).__init__(
        num_units=num_units, num_dims=2,
        input_dims=0, output_dims=0, priority_dims=0, tied=tied,
        non_recurrent_dims=None if non_recurrent_fn is None else 0,
        cell_fn=lambda n, i: rnn_cell.LSTMCell(
            num_units=n, input_size=i, forget_bias=forget_bias,
            use_peepholes=use_peepholes, state_is_tuple=False),
        non_recurrent_fn=non_recurrent_fn)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               num_units,
               tied=False,
               non_recurrent_fn=None,
               use_peepholes=False,
               forget_bias=1.0):
    super(Grid3LSTMCell, self).__init__(
        num_units=num_units, num_dims=3,
        input_dims=0, output_dims=0, priority_dims=0, tied=tied,
        non_recurrent_dims=None if non_recurrent_fn is None else 0,
        cell_fn=lambda n, i: rnn_cell.LSTMCell(
            num_units=n, input_size=i, forget_bias=forget_bias,
            use_peepholes=use_peepholes, state_is_tuple=False),
        non_recurrent_fn=non_recurrent_fn)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               num_units,
               forget_bias=1.0,
               use_peephole=False,
               use_compatible_names=False):
    """Initialize the basic LSTM cell.

    Args:
      num_units: int, The number of units in the LSTM cell.
      forget_bias: float, The bias added to forget gates (see above).
      use_peephole: Whether to use peephole connections or not.
      use_compatible_names: If True, use the same variable naming as
        rnn_cell.LSTMCell
    """
    self._num_units = num_units
    self._forget_bias = forget_bias
    self._use_peephole = use_peephole
    if use_compatible_names:
      self._names = {
          "W": "W_0",
          "b": "B",
          "wci": "W_I_diag",
          "wco": "W_O_diag",
          "wcf": "W_F_diag",
          "scope": "LSTMCell"
      }
    else:
      self._names = {
          "W": "W",
          "b": "b",
          "wci": "wci",
          "wco": "wco",
          "wcf": "wcf",
          "scope": "LSTMBlockCell"
      }
项目:RecursiveNN    作者:sapruash    | 项目源码 | 文件源码
def __init__(self,config
                ):
        self.emb_dim = config.emb_dim
        self.hidden_dim = config.hidden_dim
        self.num_emb = config.num_emb
        self.output_dim = config.output_dim
        self.config=config
        self.batch_size=config.batch_size
        self.reg=self.config.reg
        self.internal=4  #paramter for sampling sequences coresponding to subtrees 
        assert self.emb_dim > 1 and self.hidden_dim > 1

        self.add_placeholders()

        #self.cell = rnn_cell.LSTMCell(self.hidden_dim)

        emb_input = self.add_embedding()

        #self.add_model_variables()

        output_states = self.compute_states(emb_input)

        logits = self.create_output(output_states)

        self.pred = tf.nn.softmax(logits)

        self.loss,self.total_loss = self.calc_loss(logits)

        self.train_op1,self.train_op2 = self.add_training_op()
项目:RecursiveNN    作者:sapruash    | 项目源码 | 文件源码
def compute_states(self,emb):
        def unpack_sequence(tensor):
            return tf.unpack(tf.transpose(tensor, perm=[1, 0, 2]))



        with tf.variable_scope("Composition",initializer=
                               tf.contrib.layers.xavier_initializer(),regularizer=
                               tf.contrib.layers.l2_regularizer(self.reg)):
            cell_fw = rnn_cell.LSTMCell(self.hidden_dim)
            cell_bw = rnn_cell.LSTMCell(self.hidden_dim)
            #tf.cond(tf.less(self.dropout
            #if tf.less(self.dropout, tf.constant(1.0)):
            cell_fw = rnn_cell.DropoutWrapper(cell_fw,
                                           output_keep_prob=self.dropout,input_keep_prob=self.dropout)
            cell_bw=rnn_cell.DropoutWrapper(cell_bw, output_keep_prob=self.dropout,input_keep_prob=self.dropout)

            #output, state = rnn.dynamic_rnn(cell,emb,sequence_length=self.lngths,dtype=tf.float32)
            outputs,_,_=rnn.bidirectional_rnn(cell_fw,cell_bw,unpack_sequence(emb),sequence_length=self.lngths,dtype=tf.float32)
            #output = pack_sequence(outputs)
        sum_out=tf.reduce_sum(tf.pack(outputs),[0])
        sent_rep = tf.div(sum_out,tf.expand_dims(tf.to_float(self.lngths),1))



        final_state=sent_rep
        return final_state
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               num_units,
               num_dims=1,
               input_dims=None,
               output_dims=None,
               priority_dims=None,
               non_recurrent_dims=None,
               tied=False,
               cell_fn=None,
               non_recurrent_fn=None):
    """Initialize the parameters of a Grid RNN cell

    Args:
      num_units: int, The number of units in all dimensions of this GridRNN cell
      num_dims: int, Number of dimensions of this grid.
      input_dims: int or list, List of dimensions which will receive input data.
      output_dims: int or list, List of dimensions from which the output will be
        recorded.
      priority_dims: int or list, List of dimensions to be considered as
        priority dimensions.
              If None, no dimension is prioritized.
      non_recurrent_dims: int or list, List of dimensions that are not
        recurrent.
              The transfer function for non-recurrent dimensions is specified
                via `non_recurrent_fn`,
              which is default to be `tensorflow.nn.relu`.
      tied: bool, Whether to share the weights among the dimensions of this
        GridRNN cell.
              If there are non-recurrent dimensions in the grid, weights are
                shared between each
              group of recurrent and non-recurrent dimensions.
      cell_fn: function, a function which returns the recurrent cell object. Has
        to be in the following signature:
              def cell_func(num_units, input_size):
                # ...

              and returns an object of type `RNNCell`. If None, LSTMCell with
                default parameters will be used.
      non_recurrent_fn: a tensorflow Op that will be the transfer function of
        the non-recurrent dimensions
    """
    if num_dims < 1:
      raise ValueError('dims must be >= 1: {}'.format(num_dims))

    self._config = _parse_rnn_config(num_dims, input_dims, output_dims,
                                     priority_dims, non_recurrent_dims,
                                     non_recurrent_fn or nn.relu, tied,
                                     num_units)

    cell_input_size = (self._config.num_dims - 1) * num_units
    if cell_fn is None:
      self._cell = rnn_cell.LSTMCell(
          num_units=num_units, input_size=cell_input_size, state_is_tuple=False)
    else:
      self._cell = cell_fn(num_units, cell_input_size)
      if not isinstance(self._cell, rnn_cell.RNNCell):
        raise ValueError('cell_fn must return an object of type RNNCell')
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               num_units,
               num_dims=1,
               input_dims=None,
               output_dims=None,
               priority_dims=None,
               non_recurrent_dims=None,
               tied=False,
               cell_fn=None,
               non_recurrent_fn=None):
    """Initialize the parameters of a Grid RNN cell

    Args:
      num_units: int, The number of units in all dimensions of this GridRNN cell
      num_dims: int, Number of dimensions of this grid.
      input_dims: int or list, List of dimensions which will receive input data.
      output_dims: int or list, List of dimensions from which the output will be
        recorded.
      priority_dims: int or list, List of dimensions to be considered as
        priority dimensions.
              If None, no dimension is prioritized.
      non_recurrent_dims: int or list, List of dimensions that are not
        recurrent.
              The transfer function for non-recurrent dimensions is specified
                via `non_recurrent_fn`,
              which is default to be `tensorflow.nn.relu`.
      tied: bool, Whether to share the weights among the dimensions of this
        GridRNN cell.
              If there are non-recurrent dimensions in the grid, weights are
                shared between each
              group of recurrent and non-recurrent dimensions.
      cell_fn: function, a function which returns the recurrent cell object. Has
        to be in the following signature:
              def cell_func(num_units, input_size):
                # ...

              and returns an object of type `RNNCell`. If None, LSTMCell with
                default parameters will be used.
      non_recurrent_fn: a tensorflow Op that will be the transfer function of
        the non-recurrent dimensions
    """
    if num_dims < 1:
      raise ValueError('dims must be >= 1: {}'.format(num_dims))

    self._config = _parse_rnn_config(num_dims, input_dims, output_dims,
                                     priority_dims, non_recurrent_dims,
                                     non_recurrent_fn or nn.relu, tied,
                                     num_units)

    cell_input_size = (self._config.num_dims - 1) * num_units
    if cell_fn is None:
      self._cell = rnn_cell.LSTMCell(
          num_units=num_units, input_size=cell_input_size, state_is_tuple=False)
    else:
      self._cell = cell_fn(num_units, cell_input_size)
      if not isinstance(self._cell, rnn_cell.RNNCell):
        raise ValueError('cell_fn must return an object of type RNNCell')
项目:pred_finance    作者:jjasonn0717    | 项目源码 | 文件源码
def createMultiRNN(self, n_layer, n_hidden):

        with self.sess.graph.as_default():
            self.prob = tf.placeholder("float", name="keep_prob")
            # input #
            with tf.name_scope('input'):
                self.s = tf.placeholder('float', shape=[None, INPUT_DIM, DAYS_RANGE], name='input_state')
                input_trans = tf.transpose(self.s, [2, 0, 1]) # [DAYS_RANGE, None, INPUT_DIM]
                input_reshape = tf.reshape(input_trans, [-1, INPUT_DIM])
                input_list = tf.split(0, DAYS_RANGE, input_reshape) # split to DAY_RANGE element

            with tf.name_scope('tg_input'):
                self.target_s = tf.placeholder('float', shape=[None, INPUT_DIM, DAYS_RANGE], name='input_state')
                tg_input_trans = tf.transpose(self.target_s, [2, 0, 1]) # [DAYS_RANGE, None, INPUT_DIM]
                tg_input_reshape = tf.reshape(tg_input_trans, [-1, INPUT_DIM])
                tg_input_list = tf.split(0, DAYS_RANGE, tg_input_reshape) # split to DAY_RANGE element

            # multi LSTM #
            lstm_cell = rnn_cell.LSTMCell(n_hidden, use_peepholes=True, forget_bias=1.0, state_is_tuple=True)
            lstm_drop = rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=self.prob)
            lstm_stack = rnn_cell.MultiRNNCell([lstm_drop] * n_layer, state_is_tuple=True)

            tg_lstm_cell = rnn_cell.LSTMCell(n_hidden, use_peepholes=True, forget_bias=1.0, state_is_tuple=True)
            tg_lstm_drop = rnn_cell.DropoutWrapper(tg_lstm_cell, output_keep_prob=self.prob)
            tg_lstm_stack = rnn_cell.MultiRNNCell([tg_lstm_drop] * n_layer, state_is_tuple=True)

            lstm_output, hidden_states = rnn.rnn(lstm_stack,
                                                 input_list,
                                                 dtype='float',
                                                 scope='LSTMStack') # out: [timestep, batch, hidden], state: [cell, 2(for c, h), batch, hidden]
            tg_lstm_output, tg_hidden_states = rnn.rnn(tg_lstm_stack, tg_input_list, dtype='float', scope='tg_LSTMStack')

            for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="LSTMStack"):
                tf.add_to_collection("L2_VARIABLES", var)

            h_fc1 = self.FC_layer(lstm_output[-1], tg_lstm_output[-1], [n_hidden, 1024], name='h_fc1', activate=True)
            h_fc2 = self.FC_layer(h_fc1[0], h_fc1[1], [1024, ACTIONS], name='h_fc2', activate=False)

            key = tf.GraphKeys.TRAINABLE_VARIABLES
            update_pair = zip(tf.get_collection(key, scope="LSTMStack"), tf.get_collection(key, scope="tg_LSTMStack"))
            for var, tg_var in update_pair:
                self.update_list.append(tg_var.assign(var))

            # readout layer
            self.readout = h_fc2[0]
            self.target_readout = h_fc2[1]