我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.slice()。
def _gini(self, class_counts): """Calculate the Gini impurity. If c(i) denotes the i-th class count and c = sum_i c(i) then score = 1 - sum_i ( c(i) / c )^2 Args: class_counts: A 2-D tensor of per-class counts, usually a slice or gather from variables.node_sums. Returns: A 1-D tensor of the Gini impurities for each row in the input. """ smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1]) sums = math_ops.reduce_sum(smoothed, 1) sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1) return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts): """Our split score is the Gini impurity times the number of examples. If c(i) denotes the i-th class count and c = sum_i c(i) then score = c * (1 - sum_i ( c(i) / c )^2 ) = c - sum_i c(i)^2 / c Args: class_counts: A 2-D tensor of per-class counts, usually a slice or gather from variables.node_sums. Returns: A 1-D tensor of the Gini impurities for each row in the input. """ smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1]) sums = math_ops.reduce_sum(smoothed, 1) sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1) return sums - sum_squares / sums
def _variance(self, sums, squares): """Calculate the variance for each row of the input tensors. Variance is V = E[x^2] - (E[x])^2. Args: sums: A tensor containing output sums, usually a slice from variables.node_sums. Should contain the number of examples seen in index 0 so we can calculate expected value. squares: Same as sums, but sums of squares. Returns: A 1-D tensor of the variances for each row in the input. """ total_count = array_ops.slice(sums, [0, 0], [-1, 1]) e_x = sums / total_count e_x2 = squares / total_count return math_ops.reduce_sum(e_x2 - math_ops.square(e_x), 1)
def average_impurity(self): """Constructs a TF graph for evaluating the average leaf impurity of a tree. If in regression mode, this is the leaf variance. If in classification mode, this is the gini impurity. Returns: The last op in the graph. """ children = array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1]) is_leaf = math_ops.equal(constants.LEAF_NODE, children) leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1])) counts = array_ops.gather(self.variables.node_sums, leaves) gini = self._weighted_gini(counts) # Guard against step 1, when there often are no leaves yet. def impurity(): return gini # Since average impurity can be used for loss, when there's no data just # return a big number so that loss always decreases. def big(): return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000. return control_flow_ops.cond(math_ops.greater( array_ops.shape(leaves)[0], 0), impurity, big)
def inference_graph(self, data): with ops.device(self.device_assigner.get_device(self.layer_num)): routing_probabilities = self.training_ops.k_feature_routing_function( data, self.tree_parameters, self.tree_thresholds, max_nodes=self.params.num_nodes, num_features_per_node=self.params.num_features_per_node, layer_num=0, random_seed=self.params.base_random_seed) output = array_ops.slice( routing_probabilities, [0, self.params.num_nodes - self.params.num_leaves - 1], [-1, self.params.num_leaves]) return output
def soft_inference_graph(self, data): with ops.device(self.device_assigner.get_device(self.layer_num)): path_probability, path = ( self.training_ops.stochastic_hard_routing_function( data, self.tree_parameters, self.tree_thresholds, tree_depth=self.params.hybrid_tree_depth, random_seed=self.params.base_random_seed)) output = array_ops.slice( self.training_ops.unpack_path(path, path_probability), [0, self.params.num_nodes - self.params.num_leaves - 1], [-1, self.params.num_leaves]) return output
def batch_shape(self, name="batch_shape"): """Shape of batches associated with this operator. If this operator represents the batch matrix `A` with `A.shape = [N1,...,Nn, k, k]`, the `batch_shape` is `[N1,...,Nn]`. Args: name: A name scope to use for ops added by this method. Returns: `int32` `Tensor` """ # Derived classes get this "for free" once .shape() is implemented. with ops.name_scope(self.name): with ops.name_scope(name, values=self.inputs): return array_ops.slice(self.shape(), [0], [self.rank() - 2])
def _get_identity_operator(self, v): """Get an `OperatorPDIdentity` to play the role of `D` in `VDV^T`.""" with ops.name_scope("get_identity_operator", values=[v]): if v.get_shape().is_fully_defined(): v_shape = v.get_shape().as_list() v_batch_shape = v_shape[:-2] r = v_shape[-1] id_shape = v_batch_shape + [r, r] else: v_shape = array_ops.shape(v) v_rank = array_ops.rank(v) v_batch_shape = array_ops.slice(v_shape, [0], [v_rank - 2]) r = array_ops.gather(v_shape, v_rank - 1) # Last dim of v id_shape = array_ops.concat(0, (v_batch_shape, [r, r])) return operator_pd_identity.OperatorPDIdentity( id_shape, v.dtype, verify_pd=self._verify_pd)
def _attention(self, query, attn_states): conv2d = nn_ops.conv2d reduce_sum = math_ops.reduce_sum softmax = nn_ops.softmax tanh = math_ops.tanh with vs.variable_scope("Attention"): k = vs.get_variable("AttnW", [1, 1, self._attn_size, self._attn_vec_size]) v = vs.get_variable("AttnV", [self._attn_vec_size]) hidden = array_ops.reshape(attn_states, [-1, self._attn_length, 1, self._attn_size]) hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME") y = _linear(query, self._attn_vec_size, True) y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size]) s = reduce_sum(v * tanh(hidden_features + y), [2, 3]) a = softmax(s) d = reduce_sum( array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2]) new_attns = array_ops.reshape(d, [-1, self._attn_size]) new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1]) return new_attns, new_attn_states
def _dense_inner_flatten(inputs, new_rank): """Helper function for `inner_flatten`.""" rank_assertion = check_ops.assert_rank_at_least( inputs, new_rank, message='inputs has rank less than new_rank') with ops.control_dependencies([rank_assertion]): outer_dimensions = array_ops.slice( array_ops.shape(inputs), [0], [new_rank - 1]) new_shape = array_ops.concat(0, (outer_dimensions, [-1])) reshaped = array_ops.reshape(inputs, new_shape) # if `new_rank` is an integer, try to calculate new shape. if isinstance(new_rank, six.integer_types): static_shape = inputs.get_shape() if static_shape is not None and static_shape.dims is not None: static_shape = static_shape.as_list() static_outer_dims = static_shape[:new_rank - 1] static_inner_dims = static_shape[new_rank - 1:] flattened_dimension = 1 for inner_dim in static_inner_dims: if inner_dim is None: flattened_dimension = None break flattened_dimension *= inner_dim reshaped.set_shape(static_outer_dims + [flattened_dimension]) return reshaped
def _attention(self, query, attn_states): conv2d = nn_ops.conv2d reduce_sum = math_ops.reduce_sum softmax = nn_ops.softmax tanh = math_ops.tanh with tf.variable_scope("attention"): k = tf.get_variable( "attn_w", [1, 1, self._attn_size, self._attn_vec_size]) v = tf.get_variable("attn_v", [self._attn_vec_size]) hidden = array_ops.reshape(attn_states, [-1, self._attn_length, 1, self._attn_size]) hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME") y = _linear(query, self._attn_vec_size, True) y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size]) s = reduce_sum(v * tanh(hidden_features + y), [2, 3]) a = softmax(s) d = reduce_sum( array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2]) new_attns = array_ops.reshape(d, [-1, self._attn_size]) new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1]) return new_attns, new_attn_states
def multiPLSTM(cells, inputs, lens, n_input, initial_states): """ Function to build multilayer PLSTM :param cells: :param inputs: :param lens: 2D tensor, length of the sequences in the batch (for synamic rnn use) :param n_input: integer, number of features in the input (without time feature) :param initial_states: list of tuples of initial states :return: 3D tensor, output of the multilayer PLSTM """ assert (len(initial_states) == len(cells)) times = tf.slice(inputs, [0, 0, n_input], [-1, -1, 1]) newX = tf.slice(inputs, [0, 0, 0], [-1, -1, n_input]) for k, cell, initial_state in zip(range(len(cells)), cells, initial_states): newX = tf.concat(2, [newX, times]) with tf.variable_scope("{}".format(k)): outputs, initial_state = tf.nn.dynamic_rnn(cell, newX, dtype=tf.float32, sequence_length=lens, initial_state=initial_state) newX = outputs return newX
def multiPLSTM(cells, inputs, lens, n_input, initial_states): """ Function to build multilayer PLSTM :param cells: :param inputs: :param lens: 2D tensor, length of the sequences in the batch (for synamic rnn use) :param n_input: integer, number of features in the input (without time feature) :param initial_states: list of tuples of initial states :return: 3D tensor, output of the multilayer PLSTM """ assert (len(initial_states) == len(cells)) times = tf.slice(inputs, [0, 0, n_input], [-1, -1, 1]) new_x = tf.slice(inputs, [0, 0, 0], [-1, -1, n_input]) for k, cell, initial_state in zip(range(len(cells)), cells, initial_states): new_x = tf.concat(axis=2, values=[new_x, times]) with tf.variable_scope("{}".format(k)): outputs, initial_states[k] = tf.nn.dynamic_rnn(cell, new_x, dtype=tf.float32, sequence_length=lens, initial_state=initial_state) new_x = outputs return new_x
def __call__(self, inputs, state, scope=None): """Run this multi-layer cell on inputs, starting from state.""" with vs.variable_scope(scope or type(self).__name__): # "MultiRNNCell" cur_state_pos = 0 cur_inp = inputs new_states = [] for i, cell in enumerate(self._cells): with vs.variable_scope("Cell%d" % i): if self._state_is_tuple: if not nest.is_sequence(state): raise ValueError( "Expected state to be a tuple of length %d, but received: %s" % (len(self.state_size), state)) cur_state = state[i] else: cur_state = array_ops.slice( state, [0, cur_state_pos], [-1, cell.state_size]) cur_state_pos += cell.state_size cur_inp, new_state = cell(cur_inp, cur_state) new_states.append(new_state) new_states = (tuple(new_states) if self._state_is_tuple else array_ops.concat(1, new_states)) return cur_inp, new_states
def _split_logits(self, logits): """Splits logits for heads. Args: logits: the logits tensor. Returns: A list of logits for the individual heads. """ all_logits = [] begin = 0 for head in self._heads: current_logits_size = head.logits_dimension current_logits = array_ops.slice(logits, [0, begin], [-1, current_logits_size]) all_logits.append(current_logits) begin += current_logits_size return all_logits
def call(self, inputs, state): """Run this multi-layer cell on inputs, starting from state.""" cur_state_pos = 0 cur_inp = inputs new_states = [] for i, cell in enumerate(self._cells): with vs.variable_scope("cell_%d" % i): if self._state_is_tuple: if not nest.is_sequence(state): raise ValueError( "Expected state to be a tuple of length %d, but received: %s" % (len(self.state_size), state)) cur_state = state[i] else: cur_state = array_ops.slice(state, [0, cur_state_pos], [-1, cell.state_size]) cur_state_pos += cell.state_size cur_inp, new_state = cell(cur_inp, cur_state) new_states.append(new_state) new_states = (tuple(new_states) if self._state_is_tuple else array_ops.concat(new_states, 1)) return cur_inp, new_states
def crop_to_1d_bounding_box(image, offset_height, target_height, dynamic_shape=False): """Crops an image to a specified bounding box. This op cuts a rectangular part out of `image`. The top-left corner of the returned image is at `offset_height, offset_width` in `image`, and its lower-right corner is at `offset_height + target_height, offset_width + target_width`. Args: image: 3-D tensor with shape `[height, width, channels]` offset_height: Vertical coordinate of the top-left corner of the result in the input. target_height: Height of the result. dynamic_shape: Whether the input image has undertermined shape. If set to `True`, shape information will be retrieved at run time. Default to `False`. Returns: 3-D tensor of image with shape `[target_height, target_width, channels]` Raises: ValueError: If the shape of `image` is incompatible with the `offset_*` or `target_*` arguments, and `dynamic_shape` is set to `False`. """ image = tf.convert_to_tensor(image, name='image') height, _ = _ImageDimensions(image, dynamic_shape=dynamic_shape) cropped = array_ops.slice(image, array_ops.pack([offset_height, 0]), array_ops.pack([target_height, -1])) return cropped
def crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width, dynamic_shape=False): """Crops an image to a specified bounding box. This op cuts a rectangular part out of `image`. The top-left corner of the returned image is at `offset_height, offset_width` in `image`, and its lower-right corner is at `offset_height + target_height, offset_width + target_width`. Args: image: 3-D tensor with shape `[height, width, channels]` offset_height: Vertical coordinate of the top-left corner of the result in the input. offset_width: Horizontal coordinate of the top-left corner of the result in the input. target_height: Height of the result. target_width: Width of the result. dynamic_shape: Whether the input image has undertermined shape. If set to `True`, shape information will be retrieved at run time. Default to `False`. Returns: 3-D tensor of image with shape `[target_height, target_width, channels]` Raises: ValueError: If the shape of `image` is incompatible with the `offset_*` or `target_*` arguments, and `dynamic_shape` is set to `False`. """ image = tf.convert_to_tensor(image, name='image') _Check3DImage(image, require_static=(not dynamic_shape)) shapes = _ImageDimensions(image, dynamic_shape=dynamic_shape) cropped = array_ops.slice(image, array_ops.pack([offset_height, 0]), array_ops.pack([target_height, -1])) return cropped # In[3]:
def _get_input_for_group(self, inpt, group_id, group_size): """ Slices inputs into groups to prepare for processing by cell's groups :param inpt: inputs :param group_id: group id, for which to prepare extract input_group_id :param group_size: size of the group :return: subset of inputs, correspoinding to group group_id """ return array_ops.slice(input_=inpt, begin=[0, group_id * group_size], size=[inpt.get_shape()[0].value, group_size], name="GLSTMinputGroupCreation")
def __call__(self, inputs, question, state, m_input_size, m_size, scope=None): """Run this multi-layer cell on inputs, starting from state.""" with vs.variable_scope(scope or type(self).__name__): # "MultiRNNCell" cur_state_pos = 0 cur_inp = inputs new_states = [] for i, cell in enumerate(self._cells): with vs.variable_scope("Cell%d" % i): cur_state = array_ops.slice( state, [0, cur_state_pos], [-1, cell.state_size]) cur_state_pos += cell.state_size cur_inp, new_state = cell(cur_inp, question, cur_state, m_input_size, m_size) new_states.append(new_state) return cur_inp, array_ops.concat(1, new_states)
def __call__(self, inputs, state, episodic_gate, scope=None): """Run this multi-layer cell on inputs, starting from state.""" with vs.variable_scope(scope or type(self).__name__): # "MultiRNNCell" cur_state_pos = 0 cur_inp = inputs new_states = [] for i, cell in enumerate(self._cells): with vs.variable_scope("Cell%d" % i): cur_state = array_ops.slice( state, [0, cur_state_pos], [-1, cell.state_size]) cur_state_pos += cell.state_size cur_inp, new_state = cell(cur_inp, cur_state, episodic_gate) new_states.append(new_state) return cur_inp, array_ops.concat(1, new_states)
def tree_initialization(self): def _init_tree(): return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op def _nothing(): return control_flow_ops.no_op() return control_flow_ops.cond( math_ops.equal(array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [1, 1])), -2), _init_tree, _nothing)
def get_stats(self, session): num_nodes = self.variables.end_of_tree.eval(session=session) - 1 num_leaves = array_ops.where( math_ops.equal(array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE) ).eval(session=session).shape[0] return TreeStats(num_nodes, num_leaves)
def random_crop(value, size, seed=None, name=None): """Randomly crops a tensor to a given size. Slices a shape `size` portion out of `value` at a uniformly chosen offset. Requires `value.shape >= size`. If a dimension should not be cropped, pass the full size of that dimension. For example, RGB images can be cropped with `size = [crop_height, crop_width, 3]`. Args: value: Input tensor to crop. size: 1-D tensor with size the rank of `value`. seed: Python integer. Used to create a random seed. See @{tf.set_random_seed} for behavior. name: A name for this operation (optional). Returns: A cropped tensor of the same rank as `value` and shape `size`. """ # TODO(shlens): Implement edge case to guarantee output size dimensions. # If size > value.shape, zero pad the result so that it always has shape # exactly size. with ops.name_scope(name, "random_crop", [value, size]) as name: value = ops.convert_to_tensor(value, name="value") size = ops.convert_to_tensor(size, dtype=dtypes.int32, name="size") shape = array_ops.shape(value) check = control_flow_ops.Assert( math_ops.reduce_all(shape >= size), ["Need value.shape >= size, got ", shape, size], summarize=1000) shape = control_flow_ops.with_dependencies([check], shape) limit = shape - size + 1 offset = random_uniform( array_ops.shape(shape), dtype=size.dtype, maxval=size.dtype.max, seed=seed) % limit return array_ops.slice(value, offset, size, name=name)
def multinomial(logits, num_samples, seed=None, name=None): """Draws samples from a multinomial distribution. Example: ```python # samples has shape [1, 5], where each value is either 0 or 1 with equal # probability. samples = tf.multinomial(tf.log([[10., 10.]]), 5)
Args: logits: 2-D Tensor with shape [batch_size, num_classes]. Each slice [i, :] represents the log-odds for all classes. num_samples: 0-D. Number of independent samples to draw for each row slice. seed: A Python integer. Used to create a random seed for the distribution. See @{tf.set_random_seed} for behavior. name: Optional name for the operation.
[batch_size, num_classes]
[i, :]
Returns: The drawn samples of shape [batch_size, num_samples]. """ with ops.name_scope(name, "multinomial", [logits]): logits = ops.convert_to_tensor(logits, name="logits") seed1, seed2 = random_seed.get_seed(seed) return gen_random_ops.multinomial( logits, num_samples, seed=seed1, seed2=seed2) ```
[batch_size, num_samples]
def random_poisson(lam, shape, dtype=dtypes.float32, seed=None, name=None): """Draws `shape` samples from each of the given Poisson distribution(s). `lam` is the rate parameter describing the distribution(s). Example: samples = tf.random_poisson([0.5, 1.5], [10]) # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents # the samples drawn from each distribution samples = tf.random_poisson([12.2, 3.3], [7, 5]) # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1] # represents the 7x5 samples drawn from each of the two distributions Args: lam: A Tensor or Python value or N-D array of type `dtype`. `lam` provides the rate parameter(s) describing the poisson distribution(s) to sample. shape: A 1-D integer Tensor or Python array. The shape of the output samples to be drawn per "rate"-parameterized distribution. dtype: The type of `lam` and the output: `float16`, `float32`, or `float64`. seed: A Python integer. Used to create a random seed for the distributions. See @{tf.set_random_seed} for behavior. name: Optional name for the operation. Returns: samples: a `Tensor` of shape `tf.concat(shape, tf.shape(lam))` with values of type `dtype`. """ with ops.name_scope(name, "random_poisson", [lam, shape]): lam = ops.convert_to_tensor(lam, name="lam", dtype=dtype) shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32) seed1, seed2 = random_seed.get_seed(seed) return gen_random_ops._random_poisson(shape, lam, seed=seed1, seed2=seed2)
def crf_log_norm(inputs, sequence_lengths, transition_params): """Computes the normalization for a CRF. Args: inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials to use as input to the CRF layer. sequence_lengths: A [batch_size] vector of true sequence lengths. transition_params: A [num_tags, num_tags] transition matrix. Returns: log_norm: A [batch_size] vector of normalizers for a CRF. """ # Split up the first and rest of the inputs in preparation for the forward # algorithm. first_input = array_ops.slice(inputs, [0, 0, 0], [-1, 1, -1]) first_input = array_ops.squeeze(first_input, [1]) rest_of_input = array_ops.slice(inputs, [0, 1, 0], [-1, -1, -1]) # Compute the alpha values in the forward algorithm in order to get the # partition function. forward_cell = CrfForwardRnnCell(transition_params) _, alphas = rnn.dynamic_rnn( cell=forward_cell, inputs=rest_of_input, sequence_length=sequence_lengths - 1, initial_state=first_input, dtype=dtypes.float32) log_norm = math_ops.reduce_logsumexp(alphas, [1]) return log_norm
def crf_binary_score(tag_indices, sequence_lengths, transition_params): """Computes the binary scores of tag sequences. Args: tag_indices: A [batch_size, max_seq_len] matrix of tag indices. sequence_lengths: A [batch_size] vector of true sequence lengths. transition_params: A [num_tags, num_tags] matrix of binary potentials. Returns: binary_scores: A [batch_size] vector of binary scores. """ # Get shape information. num_tags = transition_params.get_shape()[0] num_transitions = array_ops.shape(tag_indices)[1] - 1 # Truncate by one on each side of the sequence to get the start and end # indices of each transition. start_tag_indices = array_ops.slice(tag_indices, [0, 0], [-1, num_transitions]) end_tag_indices = array_ops.slice(tag_indices, [0, 1], [-1, num_transitions]) # Encode the indices in a flattened representation. flattened_transition_indices = start_tag_indices * num_tags + end_tag_indices flattened_transition_params = array_ops.reshape(transition_params, [-1]) # Get the binary scores based on the flattened representation. binary_scores = array_ops.gather(flattened_transition_params, flattened_transition_indices) masks = _lengths_to_masks(sequence_lengths, array_ops.shape(tag_indices)[1]) truncated_masks = array_ops.slice(masks, [0, 1], [-1, -1]) binary_scores = math_ops.reduce_sum(binary_scores * truncated_masks, 1) return binary_scores
def unit_norm(inputs, dim, epsilon=1e-7, scope=None): """Normalizes the given input across the specified dimension to unit length. Note that the rank of `input` must be known. Args: inputs: A `Tensor` of arbitrary size. dim: The dimension along which the input is normalized. epsilon: A small value to add to the inputs to avoid dividing by zero. scope: Optional scope for variable_scope. Returns: The normalized `Tensor`. Raises: ValueError: If dim is smaller than the number of dimensions in 'inputs'. """ with variable_scope.variable_scope(scope, 'UnitNorm', [inputs]): if not inputs.get_shape(): raise ValueError('The input rank must be known.') input_rank = len(inputs.get_shape().as_list()) if dim < 0 or dim >= input_rank: raise ValueError( 'dim must be positive but smaller than the input rank.') lengths = math_ops.sqrt(epsilon + math_ops.reduce_sum( math_ops.square(inputs), dim, True)) multiples = [] if dim > 0: multiples.append(array_ops.ones([dim], dtypes.int32)) multiples.append(array_ops.slice(array_ops.shape(inputs), [dim], [1])) if dim < (input_rank - 1): multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32)) multiples = array_ops.concat(0, multiples) return math_ops.div(inputs, array_ops.tile(lengths, multiples))
def inference_graph(self, data): with ops.device(self.device_assigner.get_device(self.layer_num)): path_probability, path = self.training_ops.hard_routing_function( data, self.tree_parameters, self.tree_thresholds, max_nodes=self.params.num_nodes, tree_depth=self.params.hybrid_tree_depth) output = array_ops.slice( self.training_ops.unpack_path(path, path_probability), [0, self.params.num_nodes - self.params.num_leaves - 1], [-1, self.params.num_leaves]) return output
def inference_graph(self, data): with ops.device(self.device_assigner.get_device(self.layer_num)): routes = self.training_ops.routing_function( data, self.tree_parameters, self.tree_thresholds, max_nodes=self.params.num_nodes) leaf_routes = array_ops.slice( routes, [0, self.params.num_nodes - self.params.num_leaves - 1], [-1, self.params.num_leaves]) return leaf_routes
def get_shape(self, x, name="get_shape"): """Returns `Tensor`'s shape partitioned into `sample`, `batch`, `event`. Args: x: `Tensor`. name: `String`. The name to give this op. Returns: sample_shape: `Tensor` (1D, `int32`). batch_shape: `Tensor` (1D, `int32`). event_shape: `Tensor` (1D, `int32`). """ with self._name_scope(name, values=[x]): x = ops.convert_to_tensor(x, name="x") def slice_shape(start_sum, size, name): """Closure to slice out shape.""" start_sum = start_sum if start_sum else ( array_ops.zeros((), dtype=dtypes.int32, name="zero"),) if (x.get_shape().ndims is not None and self._is_all_constant_helper(size, *start_sum)): start = sum([tensor_util.constant_value(s) for s in start_sum]) stop = start + tensor_util.constant_value(size) slice_ = x.get_shape()[start:stop].as_list() if all(s is not None for s in slice_): return ops.convert_to_tensor(slice_, dtype=dtypes.int32, name=name) # Fall-through intended. return array_ops.slice(array_ops.shape(x), (sum(start_sum),), (size,)) sample_ndims = self.get_sample_ndims(x, name=name) return (slice_shape((), sample_ndims, name="sample_shape"), slice_shape((sample_ndims,), self.batch_ndims, name="batch_shape"), slice_shape((sample_ndims, self.batch_ndims), self.event_ndims, name="event_shape"))
def _event_shape(self): s = self.scale_operator_pd.shape() return array_ops.slice(s, array_ops.shape(s) - 2, [2])
def _flip_vector_to_matrix_dynamic(vec, batch_shape): """flip_vector_to_matrix with dynamic shapes.""" # Shapes associated with batch_shape batch_rank = array_ops.size(batch_shape) # Shapes associated with vec. vec = ops.convert_to_tensor(vec, name="vec") vec_shape = array_ops.shape(vec) vec_rank = array_ops.rank(vec) vec_batch_rank = vec_rank - 1 m = vec_batch_rank - batch_rank # vec_shape_left = [M1,...,Mm] or []. vec_shape_left = array_ops.slice(vec_shape, [0], [m]) # If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1 # If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm] condensed_shape = [math_ops.reduce_prod(vec_shape_left)] k = array_ops.gather(vec_shape, vec_rank - 1) new_shape = array_ops.concat(0, (batch_shape, [k], condensed_shape)) def _flip_front_dims_to_back(): # Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm] perm = array_ops.concat( 0, (math_ops.range(m, vec_rank), math_ops.range(0, m))) return array_ops.transpose(vec, perm=perm) x_flipped = control_flow_ops.cond( math_ops.less(0, m), _flip_front_dims_to_back, lambda: array_ops.expand_dims(vec, -1)) return array_ops.reshape(x_flipped, new_shape)
def _check_shapes_dynamic(self, operator, v, diag): """Return (v, diag) with Assert dependencies, which check shape.""" checks = [] with ops.name_scope("check_shapes", values=[operator, v, diag]): s_v = array_ops.shape(v) r_op = operator.rank() r_v = array_ops.rank(v) if diag is not None: s_d = array_ops.shape(diag) r_d = array_ops.rank(diag) # Check tensor rank. checks.append(check_ops.assert_rank(v, r_op)) if diag is not None: checks.append(check_ops.assert_rank(diag, r_op - 1)) # Check batch shape checks.append(check_ops.assert_equal( operator.batch_shape(), array_ops.slice(s_v, [0], [r_v - 2]))) if diag is not None: checks.append(check_ops.assert_equal( operator.batch_shape(), array_ops.slice(s_d, [0], [r_d - 1]))) # Check event shape checks.append(check_ops.assert_equal( operator.vector_space_dimension(), array_ops.gather(s_v, r_v - 2))) if diag is not None: checks.append(check_ops.assert_equal( array_ops.gather(s_v, r_v - 1), array_ops.gather(s_d, r_d - 1))) v = control_flow_ops.with_dependencies(checks, v) if diag is not None: diag = control_flow_ops.with_dependencies(checks, diag) return v, diag