Python tensorflow.python.ops.rnn_cell 模块,LSTMStateTuple() 实例源码

我们从Python开源项目中,提取了以下15个代码示例,用于说明如何使用tensorflow.python.ops.rnn_cell.LSTMStateTuple()

项目:Tensorflow-SegNet    作者:tkuanlun350    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Convolutional Long short-term memory cell (ConvLSTM)."""
    with vs.variable_scope(scope or type(self).__name__): # "ConvLSTMCell"
      if self._state_is_tuple:
        c, h = state
      else:
        c, h = array_ops.split(3, 2, state)

      # batch_size * height * width * channel
      concat = _conv([inputs, h], 4 * self._num_units, self._k_size, True, initializer=self._initializer)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = array_ops.split(3, 4, concat)

      new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
               self._activation(j))
      new_h = self._activation(new_c) * sigmoid(o)

      if self._state_is_tuple:
        new_state = LSTMStateTuple(new_c, new_h)
      else:
        new_state = array_ops.concat(3, [new_c, new_h])
      return new_h, new_state
项目:BDD_Driving_Model    作者:gy20073    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with tf.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
            # Parameters of gates are concatenated into one multiply for efficiency.
            if self._state_is_tuple:
                c, h = state
            else:
                c, h = tf.split(3, 2, state)
            concat = _conv_linear([inputs, h], self.filter_size, self.num_features * 4, True)

            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            i, j, f, o = tf.split(3, 4, concat)

            new_c = (c * tf.nn.sigmoid(f + self._forget_bias) + tf.nn.sigmoid(i) *
                     self._activation(j))
            new_h = self._activation(new_c) * tf.nn.sigmoid(o)

            if self._state_is_tuple:
                new_state = LSTMStateTuple(new_c, new_h)
            else:
                new_state = tf.concat(3, [new_c, new_h])
            return new_h, new_state
项目:Conv3D_CLSTM    作者:GuangmingZhu    | 项目源码 | 文件源码
def __call__(self, inputs, state, k_size=3, scope=None):
    """Convolutional Long short-term memory cell (ConvLSTM)."""
    with vs.variable_scope(scope or type(self).__name__): # "ConvLSTMCell"
      if self._state_is_tuple:
        c, h = state
      else:
        c, h = array_ops.split(3, 2, state)

      # batch_size * height * width * channel
      concat = _conv([inputs, h], 4 * self._num_units, k_size, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = array_ops.split(3, 4, concat)

      new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
               self._activation(j))
      new_h = self._activation(new_c) * sigmoid(o)

      if self._state_is_tuple:
        new_state = LSTMStateTuple(new_c, new_h)
      else:
        new_state = array_ops.concat(3, [new_c, new_h])
      return new_h, new_state
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def state_size(self):
    return rnn_cell.LSTMStateTuple(self._num_units, self._num_units)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """LSTM cell with layer normalization and recurrent dropout."""

    with vs.variable_scope(scope or type(self).__name__) as scope:  # LayerNormBasicLSTMCell  # pylint: disable=unused-variables
      c, h = state
      args = array_ops.concat(1, [inputs, h])
      concat = self._linear(args)

      i, j, f, o = array_ops.split(1, 4, concat)
      if self._layer_norm:
        i = self._norm(i, "input")
        j = self._norm(j, "transform")
        f = self._norm(f, "forget")
        o = self._norm(o, "output")

      g = self._activation(j)
      if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
        g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)

      new_c = (c * math_ops.sigmoid(f + self._forget_bias)
               + math_ops.sigmoid(i) * g)
      if self._layer_norm:
        new_c = self._norm(new_c, "state")
      new_h = self._activation(new_c) * math_ops.sigmoid(o)

      new_state = rnn_cell.LSTMStateTuple(new_c, new_h)
      return new_h, new_state
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def state_size(self):
    return rnn_cell.LSTMStateTuple(self._num_units, self._num_units)
项目:Tensorflow-SegNet    作者:tkuanlun350    | 项目源码 | 文件源码
def state_size(self):
    return (LSTMStateTuple(self._num_units, self._num_units)
            if self._state_is_tuple else 2 * self._num_units)
项目:Tensorflow-SegNet    作者:tkuanlun350    | 项目源码 | 文件源码
def state_size(self):
    return (LSTMStateTuple(self._num_units, self._num_units)
            if self._state_is_tuple else 2 * self._num_units)
项目:Tensorflow-SegNet    作者:tkuanlun350    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Convolutional Long short-term memory cell (ConvLSTM)."""
    with vs.variable_scope(scope or type(self).__name__): # "ConvLSTMCell"
      if self._state_is_tuple:
        c, h = state
      else:
        c, h = array_ops.split(3, 2, state)
      s1 = vs.get_variable("s1", initializer=tf.ones([self._height, self._width, 4 * self._num_units]), dtype=tf.float32)
      s2 = vs.get_variable("s2", initializer=tf.ones([self._height, self._width, 4 * self._num_units]), dtype=tf.float32)
      # s3 = vs.get_variable("s3", initializer=tf.ones([self._batch_size, self._num_units]), dtype=tf.float32)

      b1 = vs.get_variable("b1", initializer=tf.zeros([self._height, self._width, 4 * self._num_units]), dtype=tf.float32)
      b2 = vs.get_variable("b2", initializer=tf.zeros([self._height, self._width, 4 * self._num_units]), dtype=tf.float32)
      # b3 = vs.get_variable("b3", initializer=tf.zeros([self._batch_size, self._num_units]), dtype=tf.float32)
      input_below_ = _conv([inputs], 4 * self._num_units, self._k_size, False, initializer=self._initializer, scope="out_1")
      input_below_ = ln(input_below_, s1, b1)
      state_below_ = _conv([h], 4 * self._num_units, self._k_size, False, initializer=self._initializer, scope="out_2")
      state_below_ = ln(state_below_, s2, b2)
      lstm_matrix = tf.add(input_below_, state_below_)

      i, j, f, o = array_ops.split(3, 4, lstm_matrix)

      # batch_size * height * width * channel
      # concat = _conv([inputs, h], 4 * self._num_units, self._k_size, True, initializer=self._initializer)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      # i, j, f, o = array_ops.split(3, 4, lstm_matrix)

      new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
               self._activation(j))
      new_h = self._activation(new_c) * sigmoid(o)

      if self._state_is_tuple:
        new_state = LSTMStateTuple(new_c, new_h)
      else:
        new_state = array_ops.concat(3, [new_c, new_h])
      return new_h, new_state
项目:BDD_Driving_Model    作者:gy20073    | 项目源码 | 文件源码
def state_size(self):
        return (LSTMStateTuple(self.num_features, self.num_features)
                if self._state_is_tuple else 2 * self.num_features)
项目:Conv3D_CLSTM    作者:GuangmingZhu    | 项目源码 | 文件源码
def state_size(self):
    return (LSTMStateTuple(self._num_units, self._num_units)
            if self._state_is_tuple else 2 * self._num_units)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self, num_units, use_peepholes=False,
               initializer=None, num_proj=None, proj_clip=None,
               num_unit_shards=1, num_proj_shards=1,
               forget_bias=1.0, state_is_tuple=False,
               activation=math_ops.tanh):
    """Initialize the parameters for an LSTM cell.

    Args:
      num_units: int, The number of units in the LSTM cell
      use_peepholes: bool, set True to enable diagonal/peephole connections.
      initializer: (optional) The initializer to use for the weight and
        projection matrices.
      num_proj: (optional) int, The output dimensionality for the projection
        matrices.  If None, no projection is performed.
      proj_clip: (optional) A float value.  If `num_proj > 0` and `proj_clip` is
      provided, then the projected values are clipped elementwise to within
      `[-proj_clip, proj_clip]`.
      num_unit_shards: How to split the weight matrix.  If >1, the weight
        matrix is stored across num_unit_shards.
      num_proj_shards: How to split the projection matrix.  If >1, the
        projection matrix is stored across num_proj_shards.
      forget_bias: Biases of the forget gate are initialized by default to 1
        in order to reduce the scale of forgetting at the beginning of
        the training.
      state_is_tuple: If True, accepted and returned states are 2-tuples of
        the `c_state` and `m_state`.  By default (False), they are concatenated
        along the column axis.  This default behavior will soon be deprecated.
      activation: Activation function of the inner states.
    """
    if not state_is_tuple:
      logging.warn(
          "%s: Using a concatenated state is slower and will soon be "
          "deprecated.  Use state_is_tuple=True." % self)
    self._num_units = num_units
    self._use_peepholes = use_peepholes
    self._initializer = initializer
    self._num_proj = num_proj
    self._proj_clip = proj_clip
    self._num_unit_shards = num_unit_shards
    self._num_proj_shards = num_proj_shards
    self._forget_bias = forget_bias
    self._state_is_tuple = state_is_tuple
    self._activation = activation

    if num_proj:
      self._state_size = (
          rnn_cell.LSTMStateTuple(num_units, num_proj)
          if state_is_tuple else num_units + num_proj)
      self._output_size = num_proj
    else:
      self._state_size = (
          rnn_cell.LSTMStateTuple(num_units, num_units)
          if state_is_tuple else 2 * num_units)
      self._output_size = num_units
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self, num_units, use_peepholes=False,
               initializer=None, num_proj=None, proj_clip=None,
               num_unit_shards=1, num_proj_shards=1,
               forget_bias=1.0, state_is_tuple=False,
               activation=math_ops.tanh):
    """Initialize the parameters for an LSTM cell.

    Args:
      num_units: int, The number of units in the LSTM cell
      use_peepholes: bool, set True to enable diagonal/peephole connections.
      initializer: (optional) The initializer to use for the weight and
        projection matrices.
      num_proj: (optional) int, The output dimensionality for the projection
        matrices.  If None, no projection is performed.
      proj_clip: (optional) A float value.  If `num_proj > 0` and `proj_clip` is
      provided, then the projected values are clipped elementwise to within
      `[-proj_clip, proj_clip]`.
      num_unit_shards: How to split the weight matrix.  If >1, the weight
        matrix is stored across num_unit_shards.
      num_proj_shards: How to split the projection matrix.  If >1, the
        projection matrix is stored across num_proj_shards.
      forget_bias: Biases of the forget gate are initialized by default to 1
        in order to reduce the scale of forgetting at the beginning of
        the training.
      state_is_tuple: If True, accepted and returned states are 2-tuples of
        the `c_state` and `m_state`.  By default (False), they are concatenated
        along the column axis.  This default behavior will soon be deprecated.
      activation: Activation function of the inner states.
    """
    if not state_is_tuple:
      logging.warn(
          "%s: Using a concatenated state is slower and will soon be "
          "deprecated.  Use state_is_tuple=True." % self)
    self._num_units = num_units
    self._use_peepholes = use_peepholes
    self._initializer = initializer
    self._num_proj = num_proj
    self._proj_clip = proj_clip
    self._num_unit_shards = num_unit_shards
    self._num_proj_shards = num_proj_shards
    self._forget_bias = forget_bias
    self._state_is_tuple = state_is_tuple
    self._activation = activation

    if num_proj:
      self._state_size = (
          rnn_cell.LSTMStateTuple(num_units, num_proj)
          if state_is_tuple else num_units + num_proj)
      self._output_size = num_proj
    else:
      self._state_size = (
          rnn_cell.LSTMStateTuple(num_units, num_units)
          if state_is_tuple else 2 * num_units)
      self._output_size = num_units
项目:Neural-Turing-Machine    作者:camigord    | 项目源码 | 文件源码
def build_graph(self):
        """
        builds the computational graph that performs a step-by-step evaluation
        of the input data batches
        """
        self.unpacked_input_data = utility.unpack_into_tensorarray(self.input_data, 1, self.sequence_length)

        outputs = tf.TensorArray(tf.float32, self.sequence_length)
        read_weightings = tf.TensorArray(tf.float32, self.sequence_length)
        write_weightings = tf.TensorArray(tf.float32, self.sequence_length)
        write_vectors = tf.TensorArray(tf.float32, self.sequence_length)
        key_vectors = tf.TensorArray(tf.float32, self.sequence_length)
        beta_vectors = tf.TensorArray(tf.float32, self.sequence_length)
        shift_vectors = tf.TensorArray(tf.float32, self.sequence_length)
        gamma_vectors = tf.TensorArray(tf.float32, self.sequence_length)
        gates_vectors = tf.TensorArray(tf.float32, self.sequence_length)
        memory_vectors = tf.TensorArray(tf.float32, self.sequence_length)

        controller_state = self.controller.get_state() if self.controller.has_recurrent_nn else (tf.zeros(1), tf.zeros(1))
        if not isinstance(controller_state, LSTMStateTuple):
            controller_state = LSTMStateTuple(controller_state[0], controller_state[1])

        memory_state = self.memory.init_memory()
        final_results = None

        with tf.variable_scope("Sequence_Loop") as scope:
            time = tf.constant(0, dtype=tf.int32)

            final_results = tf.while_loop(
                cond=lambda time, *_: time < self.sequence_length,
                body=self._loop_body,
                loop_vars=(
                    time, memory_state, outputs,
                    read_weightings, write_weightings, controller_state, write_vectors,
                    key_vectors, beta_vectors, shift_vectors, gamma_vectors,
                    gates_vectors, memory_vectors
                ),
                parallel_iterations=32,
                swap_memory=True
            )


        dependencies = []
        if self.controller.has_recurrent_nn:
            dependencies.append(self.controller.update_state(final_results[5]))

        with tf.control_dependencies(dependencies):
            self.packed_output = utility.pack_into_tensor(final_results[2], axis=1)
            # packed_memory_view and its content is just for debugging purposes.
            self.packed_memory_view = {
                'read_weightings': utility.pack_into_tensor(final_results[3], axis=1),
                'write_weightings': utility.pack_into_tensor(final_results[4], axis=1),
                'write_vectors': utility.pack_into_tensor(final_results[6], axis=1),
                'key_vectors': utility.pack_into_tensor(final_results[7], axis=1),
                'beta_vectors': utility.pack_into_tensor(final_results[8], axis=1),
                'shift_vectors': utility.pack_into_tensor(final_results[9], axis=1),
                'gamma_vectors': utility.pack_into_tensor(final_results[10], axis=1),
                'gates_vectors': utility.pack_into_tensor(final_results[11], axis=1),
                'memory_vectors': utility.pack_into_tensor(final_results[12], axis=1)
            }
项目:Neural-Turing-Machine    作者:camigord    | 项目源码 | 文件源码
def _loop_body(self, time, memory_state, outputs, read_weightings, write_weightings,
                   controller_state, write_vectors,key_vectors, beta_vectors, shift_vectors,
                   gamma_vectors,gates_vectors, memory_vectors):
        """
        the body of the DNC sequence processing loop

        Parameters:
        ----------
        time: Tensor
        memory_state: Tuple
        outputs: TensorArray
        read_weightings: TensorArray,
        write_weightings: TensorArray,
        controller_state: Tuple

        Returns: Tuple containing all updated arguments
        """

        step_input = self.unpacked_input_data.read(time)
        output_list = self._step_op(step_input, memory_state, controller_state)

        # update memory parameters
        new_controller_state = tf.zeros(1)
        new_memory_state = tuple(output_list[0:4])

        new_controller_state = LSTMStateTuple(output_list[5], output_list[6])

        outputs = outputs.write(time, output_list[4])

        # collecting memory view for the current step
        read_weightings = read_weightings.write(time, output_list[2])
        write_weightings = write_weightings.write(time, output_list[1])
        write_vectors = write_vectors.write(time, output_list[7])
        key_vectors = key_vectors.write(time, output_list[8])
        beta_vectors = beta_vectors.write(time, output_list[9])
        shift_vectors = shift_vectors.write(time, output_list[10])
        gamma_vectors = gamma_vectors.write(time, output_list[11])
        gates_vectors = gates_vectors.write(time, output_list[12])
        memory_vectors = memory_vectors.write(time, output_list[0])

        return (
            time + 1, new_memory_state, outputs,
            read_weightings, write_weightings,
            new_controller_state, write_vectors,
            key_vectors, beta_vectors, shift_vectors, gamma_vectors,
            gates_vectors, memory_vectors
        )