我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.gather()。
def _gini(self, class_counts): """Calculate the Gini impurity. If c(i) denotes the i-th class count and c = sum_i c(i) then score = 1 - sum_i ( c(i) / c )^2 Args: class_counts: A 2-D tensor of per-class counts, usually a slice or gather from variables.node_sums. Returns: A 1-D tensor of the Gini impurities for each row in the input. """ smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1]) sums = math_ops.reduce_sum(smoothed, 1) sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1) return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts): """Our split score is the Gini impurity times the number of examples. If c(i) denotes the i-th class count and c = sum_i c(i) then score = c * (1 - sum_i ( c(i) / c )^2 ) = c - sum_i c(i)^2 / c Args: class_counts: A 2-D tensor of per-class counts, usually a slice or gather from variables.node_sums. Returns: A 1-D tensor of the Gini impurities for each row in the input. """ smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1]) sums = math_ops.reduce_sum(smoothed, 1) sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1) return sums - sum_squares / sums
def average_impurity(self): """Constructs a TF graph for evaluating the average leaf impurity of a tree. If in regression mode, this is the leaf variance. If in classification mode, this is the gini impurity. Returns: The last op in the graph. """ children = array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1]) is_leaf = math_ops.equal(constants.LEAF_NODE, children) leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1])) counts = array_ops.gather(self.variables.node_sums, leaves) gini = self._weighted_gini(counts) # Guard against step 1, when there often are no leaves yet. def impurity(): return gini # Since average impurity can be used for loss, when there's no data just # return a big number so that loss always decreases. def big(): return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000. return control_flow_ops.cond(math_ops.greater( array_ops.shape(leaves)[0], 0), impurity, big)
def _linear_predictions(self, examples): """Returns predictions of the form w*x.""" with name_scope('sdca/prediction'): sparse_variables = self._convert_n_to_tensor(self._variables[ 'sparse_features_weights']) result = 0.0 for sfc, sv in zip(examples['sparse_features'], sparse_variables): # TODO(sibyl-Aix6ihai): following does not take care of missing features. result += math_ops.segment_sum( math_ops.mul( array_ops.gather(sv, sfc.feature_indices), sfc.feature_values), sfc.example_indices) dense_features = self._convert_n_to_tensor(examples['dense_features']) dense_variables = self._convert_n_to_tensor(self._variables[ 'dense_features_weights']) for i in range(len(dense_variables)): result += math_ops.matmul(dense_features[i], array_ops.expand_dims( dense_variables[i], -1)) # Reshaping to allow shape inference at graph construction time. return array_ops.reshape(result, [-1])
def vector_space_dimension(self, name="vector_space_dimension"): """Dimension of vector space on which this acts. The `k` in `R^k`. If this operator represents the batch matrix `A` with `A.shape = [N1,...,Nn, k, k]`, the `vector_space_dimension` is `k`. Args: name: A name scope to use for ops added by this method. Returns: `int32` `Tensor` """ # Derived classes get this "for free" once .shape() is implemented. with ops.name_scope(self.name): with ops.name_scope(name, values=self.inputs): return array_ops.gather(self.shape(), self.rank() - 1)
def _check_shape(self, shape): """Check that the init arg `shape` defines a valid operator.""" shape = ops.convert_to_tensor(shape, name="shape") if not self._verify_pd: return shape # Further checks are equivalent to verification that this is positive # definite. Why? Because the further checks simply check that this is a # square matrix, and combining the fact that this is square (and thus maps # a vector space R^k onto itself), with the behavior of .matmul(), this must # be the identity operator. rank = array_ops.size(shape) assert_matrix = check_ops.assert_less_equal(2, rank) with ops.control_dependencies([assert_matrix]): last_dim = array_ops.gather(shape, rank - 1) second_to_last_dim = array_ops.gather(shape, rank - 2) assert_square = check_ops.assert_equal(last_dim, second_to_last_dim) return control_flow_ops.with_dependencies([assert_matrix, assert_square], shape)
def _get_identity_operator(self, v): """Get an `OperatorPDIdentity` to play the role of `D` in `VDV^T`.""" with ops.name_scope("get_identity_operator", values=[v]): if v.get_shape().is_fully_defined(): v_shape = v.get_shape().as_list() v_batch_shape = v_shape[:-2] r = v_shape[-1] id_shape = v_batch_shape + [r, r] else: v_shape = array_ops.shape(v) v_rank = array_ops.rank(v) v_batch_shape = array_ops.slice(v_shape, [0], [v_rank - 2]) r = array_ops.gather(v_shape, v_rank - 1) # Last dim of v id_shape = array_ops.concat(0, (v_batch_shape, [r, r])) return operator_pd_identity.OperatorPDIdentity( id_shape, v.dtype, verify_pd=self._verify_pd)
def _check_chol(self, chol): """Verify that `chol` is proper.""" chol = ops.convert_to_tensor(chol, name="chol") if not self.verify_pd: return chol shape = array_ops.shape(chol) rank = array_ops.rank(chol) is_matrix = check_ops.assert_rank_at_least(chol, 2) is_square = check_ops.assert_equal( array_ops.gather(shape, rank - 2), array_ops.gather(shape, rank - 1)) deps = [is_matrix, is_square] diag = array_ops.matrix_diag_part(chol) deps.append(check_ops.assert_positive(diag)) return control_flow_ops.with_dependencies(deps, chol)
def _clip_sparse(self, grad, var): assert isinstance(grad, ops.IndexedSlices) clip_dims = self._vars_to_clip_dims[var] if 0 in clip_dims: logging.warning("Clipping norm across dims %s for %s is inefficient " "when including sparse dimension 0.", clip_dims, var.op.name) return self._clip_dense(var) with ops.colocate_with(var): var_subset = array_ops.gather(var.ref(), grad.indices) with self._maybe_colocate_with(var): normalized_var_subset = clip_ops.clip_by_norm( var_subset, self._max_norm, clip_dims) delta = ops.IndexedSlices( var_subset - normalized_var_subset, grad.indices, grad.dense_shape) with ops.colocate_with(var): return var.scatter_sub(delta, use_locking=self._use_locking)
def _clip_sparse(self, grad, var): assert isinstance(grad, ops.IndexedSlices) clip_dims = self._vars_to_clip_dims[var] if 0 in clip_dims: logging.warning("Clipping norm across dims %s for %s is inefficient " "when including sparse dimension 0.", clip_dims, var.op.name) return self._clip_dense(var) with ops.colocate_with(var): var_subset = array_ops.gather(var, grad.indices) with self._maybe_colocate_with(var): normalized_var_subset = clip_ops.clip_by_norm( var_subset, self._max_norm, clip_dims) delta = ops.IndexedSlices( var_subset - normalized_var_subset, grad.indices, grad.dense_shape) with ops.colocate_with(var): return var.scatter_sub(delta, use_locking=self._use_locking)
def _linear_predictions(self, examples): """Returns predictions of the form w*x.""" with name_scope('sdca/prediction'): sparse_variables = self._convert_n_to_tensor(self._variables[ 'sparse_features_weights']) result = 0.0 for sfc, sv in zip(examples['sparse_features'], sparse_variables): # TODO(sibyl-Aix6ihai): following does not take care of missing features. result += math_ops.segment_sum( math_ops.multiply( array_ops.gather(sv, sfc.feature_indices), sfc.feature_values), sfc.example_indices) dense_features = self._convert_n_to_tensor(examples['dense_features']) dense_variables = self._convert_n_to_tensor(self._variables[ 'dense_features_weights']) for i in range(len(dense_variables)): result += math_ops.matmul(dense_features[i], array_ops.expand_dims(dense_variables[i], -1)) # Reshaping to allow shape inference at graph construction time. return array_ops.reshape(result, [-1])
def input_fn(self, batch_size=None, points=None, num_epochs=None): """Returns an input_fn that randomly selects batches from given points.""" batch_size = batch_size or self.batch_size points = points if points is not None else self.points num_points = points.shape[0] def _fn(): x = constant_op.constant(points) if batch_size == num_points: return input_lib.limit_epochs(x, num_epochs=num_epochs), None indices = random_ops.random_uniform( constant_op.constant([batch_size]), minval=0, maxval=num_points - 1, dtype=dtypes.int32, seed=10) batch = array_ops.gather(x, indices) return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None) return _fn
def _get_identity_operator(self, v): """Get an `OperatorPDIdentity` to play the role of `D` in `VDV^T`.""" with ops.name_scope("get_identity_operator", values=[v]): if v.get_shape().is_fully_defined(): v_shape = v.get_shape().as_list() v_batch_shape = v_shape[:-2] r = v_shape[-1] id_shape = v_batch_shape + [r, r] else: v_shape = array_ops.shape(v) v_rank = array_ops.rank(v) v_batch_shape = array_ops.strided_slice(v_shape, [0], [v_rank - 2]) r = array_ops.gather(v_shape, v_rank - 1) # Last dim of v id_shape = array_ops.concat((v_batch_shape, [r, r]), 0) return operator_pd_identity.OperatorPDIdentity( id_shape, v.dtype, verify_pd=self._verify_pd)
def next_inputs(self, time, outputs, state, sample_ids, name=None): with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample", [time, outputs, state, sample_ids]): (finished, base_next_inputs, state) = ( super(ScheduledEmbeddingTrainingHelper, self).next_inputs( time=time, outputs=outputs, state=state, sample_ids=sample_ids, name=name)) def maybe_sample(): """Perform scheduled sampling.""" where_sampling = math_ops.cast( array_ops.where(sample_ids > -1), dtypes.int32) where_not_sampling = math_ops.cast( array_ops.where(sample_ids <= -1), dtypes.int32) where_sampling_flat = array_ops.reshape(where_sampling, [-1]) where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1]) sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat) inputs_not_sampling = array_ops.gather( base_next_inputs, where_not_sampling_flat) sampled_next_inputs = self._embedding_fn(sample_ids_sampling) base_shape = array_ops.shape(base_next_inputs) return (array_ops.scatter_nd(indices=where_sampling, updates=sampled_next_inputs, shape=base_shape) + array_ops.scatter_nd(indices=where_not_sampling, updates=inputs_not_sampling, shape=base_shape)) all_finished = math_ops.reduce_all(finished) next_inputs = control_flow_ops.cond( all_finished, lambda: base_next_inputs, maybe_sample) return (finished, next_inputs, state)
def _apply_sparse(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype) beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype) epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype) clip_multiplier_t = math_ops.cast(self.clip_multiplier_t, var.dtype.base_dtype) clip_epsilon_t = math_ops.cast(self.clip_epsilon_t, var.dtype.base_dtype) v = self.get_slot(var, "v") v_slice = array_ops.gather(v, grad.indices) #clip gradient so that each value exceeds its previous maximum by no more than clip_multiplier clipped_values = grad.values if self.clip_gradients: clipVal = v_slice * clip_multiplier_t + clip_epsilon_t clipped_values = clip_ops.clip_by_value(grad.values, -clipVal, clipVal) # m := beta1 * m + (1 - beta1) * g_t m = self.get_slot(var, "m") m_t_values = beta1_t * array_ops.gather(m, grad.indices) + (1 - beta1_t) * clipped_values m_t = state_ops.scatter_update(m, grad.indices, m_t_values, use_locking=self._use_locking) # v := max(beta2 * v , abs(grad)) v_t_values = math_ops.maximum(beta2_t * v_slice, math_ops.abs(clipped_values)) v_t = state_ops.scatter_update(v, grad.indices, v_t_values, use_locking=self._use_locking) # variable -= learning_rate * m_t / (epsilon_t + v_t) # we do not use bias-correction term for the first moment; it does not give observable benefit var_update = state_ops.scatter_sub(var, grad.indices, lr_t * m_t_values / (v_t_values + epsilon_t), use_locking=self._use_locking) return control_flow_ops.group(var_update, v_t, m_t)
def _testGradientsForAxis( self, inp_tensors, axis, output_shape, feed_dict=None): with self.test_session(): c = array_ops.concat(inp_tensors, axis) grad_inp = (np.random.rand(*output_shape) + 1j*np.random.rand(*output_shape)).astype(np.complex64) grad_tensor = constant_op.constant( [x for x in grad_inp.flatten()], shape=output_shape) grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor]) concated_grad = array_ops.concat(grad, axis) result = concated_grad.eval(feed_dict=feed_dict) self.assertAllEqual(result, grad_inp) # complex Gather not implemented # def _testIndexedSlicesGradientsForAxis( # self, inp_tensors, axis, output_shape, gather_indexes, feed_dict=None): # with self.test_session(): # c = array_ops.gather( # array_ops.concat(inp_tensors, axis), gather_indexes) # grad_inp = np.random.rand(*output_shape).astype("f") # grad_tensor = constant_op.constant( # [float(x) for x in grad_inp.flatten()], shape=output_shape) # grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor]) # concated_grad = array_ops.gather( # array_ops.concat(grad, axis), gather_indexes) # result = concated_grad.eval(feed_dict=feed_dict) # self.assertAllEqual(result, grad_inp)
def gather(reference, indices): """Retrieves the elements of indices `indices` in the tensor `reference`. Arguments: reference: A tensor. indices: An integer tensor of indices. Returns: A tensor of same type as `reference`. """ return array_ops.gather(reference, indices) # ELEMENT-WISE OPERATIONS
def call(self, inputs): if K.dtype(inputs) != 'int32': inputs = K.cast(inputs, 'int32') out = K.gather(self.embeddings, inputs) return out
def crf_unary_score(tag_indices, sequence_lengths, inputs): """Computes the unary scores of tag sequences. Args: tag_indices: A [batch_size, max_seq_len] matrix of tag indices. sequence_lengths: A [batch_size] vector of true sequence lengths. inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials. Returns: unary_scores: A [batch_size] vector of unary scores. """ batch_size = array_ops.shape(inputs)[0] max_seq_len = array_ops.shape(inputs)[1] num_tags = array_ops.shape(inputs)[2] flattened_inputs = array_ops.reshape(inputs, [-1]) offsets = array_ops.expand_dims( math_ops.range(batch_size) * max_seq_len * num_tags, 1) offsets += array_ops.expand_dims(math_ops.range(max_seq_len) * num_tags, 0) flattened_tag_indices = array_ops.reshape(offsets + tag_indices, [-1]) unary_scores = array_ops.reshape( array_ops.gather(flattened_inputs, flattened_tag_indices), [batch_size, max_seq_len]) masks = _lengths_to_masks(sequence_lengths, array_ops.shape(tag_indices)[1]) unary_scores = math_ops.reduce_sum(unary_scores * masks, 1) return unary_scores
def crf_binary_score(tag_indices, sequence_lengths, transition_params): """Computes the binary scores of tag sequences. Args: tag_indices: A [batch_size, max_seq_len] matrix of tag indices. sequence_lengths: A [batch_size] vector of true sequence lengths. transition_params: A [num_tags, num_tags] matrix of binary potentials. Returns: binary_scores: A [batch_size] vector of binary scores. """ # Get shape information. num_tags = transition_params.get_shape()[0] num_transitions = array_ops.shape(tag_indices)[1] - 1 # Truncate by one on each side of the sequence to get the start and end # indices of each transition. start_tag_indices = array_ops.slice(tag_indices, [0, 0], [-1, num_transitions]) end_tag_indices = array_ops.slice(tag_indices, [0, 1], [-1, num_transitions]) # Encode the indices in a flattened representation. flattened_transition_indices = start_tag_indices * num_tags + end_tag_indices flattened_transition_params = array_ops.reshape(transition_params, [-1]) # Get the binary scores based on the flattened representation. binary_scores = array_ops.gather(flattened_transition_params, flattened_transition_indices) masks = _lengths_to_masks(sequence_lengths, array_ops.shape(tag_indices)[1]) truncated_masks = array_ops.slice(masks, [0, 1], [-1, -1]) binary_scores = math_ops.reduce_sum(binary_scores * truncated_masks, 1) return binary_scores
def _select_last_activations(activations, sequence_lengths): """Selects the nth set of activations for each n in `sequence_length`. Reuturns a `Tensor` of shape `[batch_size, k]`. If `sequence_length` is not `None`, then `output[i, :] = activations[i, sequence_length[i], :]`. If `sequence_length` is `None`, then `output[i, :] = activations[i, -1, :]`. Args: activations: a `Tensor` with shape `[batch_size, padded_length, k]`. sequence_lengths: a `Tensor` with shape `[batch_size]` or `None`. Returns: A `Tensor` of shape `[batch_size, k]`. """ with ops.name_scope('select_last_activations', values=[activations, sequence_lengths]): activations_shape = array_ops.shape(activations) batch_size = activations_shape[0] padded_length = activations_shape[1] num_label_columns = activations_shape[2] if sequence_lengths is None: sequence_lengths = padded_length reshaped_activations = array_ops.reshape(activations, [-1, num_label_columns]) indices = math_ops.range(batch_size) * padded_length + sequence_lengths - 1 last_activations = array_ops.gather(reshaped_activations, indices) last_activations.set_shape( [activations.get_shape()[0], activations.get_shape()[2]]) return last_activations
def _flip_matrix_to_vector_dynamic(mat, batch_shape): """Flip matrix to vector with dynamic shapes.""" mat_rank = array_ops.rank(mat) k = array_ops.gather(array_ops.shape(mat), mat_rank - 2) final_shape = array_ops.concat(0, (batch_shape, [k])) # mat.shape = matrix_batch_shape + [k, M] # Permutation corresponding to [M] + matrix_batch_shape + [k] perm = array_ops.concat( 0, ([mat_rank - 1], math_ops.range(0, mat_rank - 1))) mat_with_end_at_beginning = array_ops.transpose(mat, perm=perm) vector = array_ops.reshape(mat_with_end_at_beginning, final_shape) return vector
def _check_shapes_dynamic(self, operator, v, diag): """Return (v, diag) with Assert dependencies, which check shape.""" checks = [] with ops.name_scope("check_shapes", values=[operator, v, diag]): s_v = array_ops.shape(v) r_op = operator.rank() r_v = array_ops.rank(v) if diag is not None: s_d = array_ops.shape(diag) r_d = array_ops.rank(diag) # Check tensor rank. checks.append(check_ops.assert_rank(v, r_op)) if diag is not None: checks.append(check_ops.assert_rank(diag, r_op - 1)) # Check batch shape checks.append(check_ops.assert_equal( operator.batch_shape(), array_ops.slice(s_v, [0], [r_v - 2]))) if diag is not None: checks.append(check_ops.assert_equal( operator.batch_shape(), array_ops.slice(s_d, [0], [r_d - 1]))) # Check event shape checks.append(check_ops.assert_equal( operator.vector_space_dimension(), array_ops.gather(s_v, r_v - 2))) if diag is not None: checks.append(check_ops.assert_equal( array_ops.gather(s_v, r_v - 1), array_ops.gather(s_d, r_d - 1))) v = control_flow_ops.with_dependencies(checks, v) if diag is not None: diag = control_flow_ops.with_dependencies(checks, diag) return v, diag
def _shape(self): d_shape = array_ops.shape(self._diag) k = array_ops.gather(d_shape, array_ops.size(d_shape) - 1) return array_ops.concat(0, (d_shape, [k]))
def _event_shape(self): return array_ops.gather(array_ops.shape(self.alpha), [array_ops.rank(self.alpha) - 1])
def select_last_activations(activations, sequence_lengths): """Selects the nth set of activations for each n in `sequence_length`. Reuturns a `Tensor` of shape `[batch_size, k]`. If `sequence_length` is not `None`, then `output[i, :] = activations[i, sequence_length[i], :]`. If `sequence_length` is `None`, then `output[i, :] = activations[i, -1, :]`. Args: activations: A `Tensor` with shape `[batch_size, padded_length, k]`. sequence_lengths: A `Tensor` with shape `[batch_size]` or `None`. Returns: A `Tensor` of shape `[batch_size, k]`. """ with ops.name_scope('select_last_activations', values=[activations, sequence_lengths]): activations_shape = array_ops.shape(activations) batch_size = activations_shape[0] padded_length = activations_shape[1] num_label_columns = activations_shape[2] if sequence_lengths is None: sequence_lengths = padded_length reshaped_activations = array_ops.reshape(activations, [-1, num_label_columns]) indices = math_ops.range(batch_size) * padded_length + sequence_lengths - 1 last_activations = array_ops.gather(reshaped_activations, indices) last_activations.set_shape( [activations.get_shape()[0], activations.get_shape()[2]]) return last_activations
def embedding_lookup_unique(params, ids, name=None): """Version of embedding_lookup that avoids duplicate lookups. This can save communication in the case of repeated ids. Same interface as embedding_lookup. Except it supports multi-dimensional `ids` which allows to not reshape input/output to fit gather. Args: params: A list of tensors with the same shape and type, or a `PartitionedVariable`. Shape `[index, d1, d2, ...]`. ids: A one-dimensional `Tensor` with type `int32` or `int64` containing the ids to be looked up in `params`. Shape `[ids1, ids2, ...]`. name: A name for this operation (optional). Returns: A `Tensor` with the same type as the tensors in `params` and dimension of `[ids1, ids2, d1, d2, ...]`. Raises: ValueError: If `params` is empty. """ with ops.name_scope(name, "EmbeddingLookupUnique", [params, ids]): ids = ops.convert_to_tensor(ids) shape = array_ops.shape(ids) ids_flat = array_ops.reshape( ids, math_ops.reduce_prod(shape, keep_dims=True)) unique_ids, idx = array_ops.unique(ids_flat) unique_embeddings = embedding_ops.embedding_lookup(params, unique_ids) embeds_flat = array_ops.gather(unique_embeddings, idx) embed_shape = array_ops.concat( 0, [shape, array_ops.shape(unique_embeddings)[1:]]) embeds = array_ops.reshape(embeds_flat, embed_shape) embeds.set_shape(ids.get_shape().concatenate( unique_embeddings.get_shape()[1:])) return embeds
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None): with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope: temp_axes = core.Axes( [axis] + list(labeled_tensor.axes.remove(axis.name).values())) transposed = core.transpose(labeled_tensor, temp_axes.keys()) indexed = core.LabeledTensor(array_ops.gather(transposed.tensor, indexer), temp_axes) return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
def _flip_vector_to_matrix_dynamic(vec, batch_shape): """flip_vector_to_matrix with dynamic shapes.""" # Shapes associated with batch_shape batch_rank = array_ops.size(batch_shape) # Shapes associated with vec. vec = ops.convert_to_tensor(vec, name="vec") vec_shape = array_ops.shape(vec) vec_rank = array_ops.rank(vec) vec_batch_rank = vec_rank - 1 m = vec_batch_rank - batch_rank # vec_shape_left = [M1,...,Mm] or []. vec_shape_left = array_ops.slice(vec_shape, [0], [m]) # If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1 # If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm] condensed_shape = [math_ops.reduce_prod(vec_shape_left)] k = array_ops.gather(vec_shape, vec_rank - 1) new_shape = array_ops.concat(0, (batch_shape, [k], condensed_shape)) def _flip_front_dims_to_back(): # Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm] perm = array_ops.concat( 0, (math_ops.range(m, vec_rank), math_ops.range(0, m))) return array_ops.transpose(vec, perm=perm) x_flipped = control_flow_ops.cond( math_ops.less(0, m), _flip_front_dims_to_back, lambda: array_ops.expand_dims(vec, -1)) return array_ops.reshape(x_flipped, new_shape)
def _event_shape(self): return array_ops.gather(array_ops.shape(self._mean_val), [array_ops.rank(self._mean_val) - 1])
def _gather_states(self, data, indices, batch_size): """Produce `out`, s.t. out(i, j) = data(indices(i), i, j).""" mod_indices = indices * batch_size + math_ops.range(batch_size) return array_ops.gather( array_ops.reshape(data, [-1, self.num_units]), mod_indices)