我们从Python开源项目中,提取了以下25个代码示例,用于说明如何使用tensorflow.argmin()。
def multilabel_image_to_class(label_image: tf.Tensor, classes_file: str) -> tf.Tensor: classes_color_values, colors_labels = get_classes_color_from_file_multilabel(classes_file) # Convert label_image [H,W,3] to the classes [H,W,C],int32 according to the classes [C,3] with tf.name_scope('LabelAssign'): if len(label_image.get_shape()) == 3: diff = tf.cast(label_image[:, :, None, :], tf.float32) - tf.constant(classes_color_values[None, None, :, :]) # [H,W,C,3] elif len(label_image.get_shape()) == 4: diff = tf.cast(label_image[:, :, :, None, :], tf.float32) - tf.constant( classes_color_values[None, None, None, :, :]) # [B,H,W,C,3] else: raise NotImplementedError('Length is : {}'.format(len(label_image.get_shape()))) pixel_class_diff = tf.reduce_sum(tf.square(diff), axis=-1) # [H,W,C] or [B,H,W,C] class_label = tf.argmin(pixel_class_diff, axis=-1) # [H,W] or [B,H,W] return tf.gather(colors_labels, class_label) > 0
def build_model_graph(self): with tf.variable_scope(self.name) as self.scope: self.input_place_holder = tf.placeholder(tf.float32, shape=(None, self.params.window, self.params.ob_size * 4 + 2), name='input') self.forward_cell_layers = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.LSTMCell(self.params.hidden_size) for i in range(self.params.hidden_depth)]) self.rnn_output, self.final_rnn_state = tf.nn.dynamic_rnn(self.forward_cell_layers, self.input_place_holder, dtype=tf.float32) self.outs = tf.squeeze(tf.slice(self.rnn_output, [0, self.params.window - 1, 0], [-1, 1, self.params.hidden_size]), axis=1) if not self.advantage: self.U = tf.get_variable('U', shape=[self.params.hidden_size, self.params.actions]) self.b_2 = tf.get_variable('b2', shape=[self.params.actions]) self.predictions = tf.cast((tf.matmul(self.outs, self.U) + self.b_2), 'float32') else: self.advantage_stream, self.value_stream = tf.split(self.outs, 2, 1) self.U_a = tf.get_variable('U_a', shape=[self.params.hidden_size//2, self.params.actions]) self.U_v = tf.get_variable('U_v', shape=[self.params.hidden_size//2, 1]) self.A = tf.cast(tf.matmul(self.advantage_stream, self.U_a), 'float32') self.V = tf.cast(tf.matmul(self.value_stream, self.U_v), 'float32') self.predictions = self.V + tf.subtract(self.A, tf.reduce_mean(self.A, axis=1, keep_dims=True)) self.min_score = tf.reduce_min(self.predictions, reduction_indices=[1]) self.min_action = tf.argmin(tf.squeeze(self.predictions), axis=0, name="arg_min")
def _setup_enqueuing(self, queues, **loom_kwargs): """Sets up enqueuing to the approx. smallest (least full) of `queues`.""" self.compiler.init_loom(loom_input_tensor=None, **loom_kwargs) input_tensor = self.compiler.loom_input_tensor fns = [lambda r=q: r.enqueue_many([input_tensor]) for q in queues] self.train_op = _tf_nth(fns, tf.argmin(_noised_q_sizes(queues), axis=0)) self.losses.clear() self.losses['dummy'] = tf.constant(0.0) self.save_summaries_secs = 0 self.dev_examples = None self.train_feeds.clear() self.save_model_secs = 0 self.exact_batch_sizes = True
def argmin(x, axis=-1): '''Returns the index of the minimum value along a tensor axis. ''' if axis < 0: axis = axis % len(x.get_shape()) return tf.argmin(x, axis)
def label_image_to_class(label_image: tf.Tensor, classes_file: str) -> tf.Tensor: classes_color_values = get_classes_color_from_file(classes_file) # Convert label_image [H,W,3] to the classes [H,W],int32 according to the classes [C,3] with tf.name_scope('LabelAssign'): if len(label_image.get_shape()) == 3: diff = tf.cast(label_image[:, :, None, :], tf.float32) - tf.constant(classes_color_values[None, None, :, :]) # [H,W,C,3] elif len(label_image.get_shape()) == 4: diff = tf.cast(label_image[:, :, :, None, :], tf.float32) - tf.constant( classes_color_values[None, None, None, :, :]) # [B,H,W,C,3] else: raise NotImplementedError('Length is : {}'.format(len(label_image.get_shape()))) pixel_class_diff = tf.reduce_sum(tf.square(diff), axis=-1) # [H,W,C] or [B,H,W,C] class_label = tf.argmin(pixel_class_diff, axis=-1) # [H,W] or [B,H,W] return class_label
def argmin(self, x, axis=-1): '''Returns the index of the minimum value along a tensor axis. ''' if axis < 0: axis = axis % len(x.get_shape()) return tf.argmin(x, axis)
def argmin(x, axis=-1): """Returns the index of the minimum value along an axis. # Arguments x: input tensor. axis: axis along which to perform the reduction. keepdims: whether the drop or broadcast the reduction axes. # Returns A tensor. """ if axis < 0: axis = axis % len(x.get_shape()) return tf.argmin(x, axis)
def evaluation(loss, batch_size=100): """Evaluate the quality of the logits at predicting the label. """ ruler = tf.constant(0.5, shape=[batch_size, 1]) loss_l = tf.reshape(loss, [batch_size, 1]) comp = tf.concat(1, [ruler, loss_l]) correct = tf.argmin(comp, 1) # Return the number of entries less than 0.5 return tf.reduce_sum(correct)
def argmin(self, axis=None, name='argmin'): return as_varnode(tf.argmin(self, axis=axis, name=name))
def argmin(x, axis=-1): """Returns the index of the minimum value along an axis. # Arguments x: Tensor or variable. axis: axis along which to perform the reduction. # Returns A tensor. """ axis = _normalize_axis(axis, ndim(x)) return tf.argmin(x, axis)
def match_to_dict_conv(image_as_patches, dictionary, include_counts=False): print 'match_to_dict_conv' [n,w,h,c] = dictionary.get_shape().as_list() #dict_as_filt = tf.transpose(tf.reshape(dictionary, [-1, w*h*c,1,1])) dict_as_filt = tf.transpose(tf.reshape(dictionary, [-1, w*h*c])) print dict_as_filt.get_shape() [n,w,h,c] = image_as_patches.get_shape().as_list() #image_flattened = tf.reshape(image_as_patches, [-1,1,1,w*h*c]) image_flattened = tf.reshape(image_as_patches, [-1,w*h*c]) print image_flattened.get_shape() #pair_dist = -2 * tf.reshape(tf.nn.conv2d(image_flattened, dict_as_filt, [1,1,1,1], 'SAME'), [n, -1]) pair_dist = -2 * tf.matmul(image_flattened, dict_as_filt) print pair_dist.get_shape() single_dist = tf.reduce_sum(tf.square(dictionary),[1,2,3]) distance = single_dist + pair_dist print distance.get_shape() min_loc = tf.argmin(distance,1) print min_loc.get_shape() if include_counts: y, _, count = tf.unique_with_counts(min_loc) return tf.gather(dictionary, min_loc), [y, count] else: return tf.gather(dictionary, min_loc)
def match_to_dict(image_as_patches, dictionary): patch_size = len(image_as_patches.get_shape())+1 distance = tf.reduce_sum(tf.square(tf.expand_dims(image_as_patches,1) - tf.expand_dims(dictionary,0)), range(2,patch_size)) min_loc = tf.argmin(distance, 1) return tf.gather(dictionary, min_loc)
def argmin(x, axis=-1, keepdims=False): return tf.argmin(x, axis=axis)
def assign_label(label, x, cluster_center): """ Assign Labels Input: x: embedding of size N x D label: cluster label of size N X 1 K: number of clusters tf_eps: small constant Output: cluster_center: cluster center of size K x D """ dist = pdist(x, cluster_center) return label.assign(tf.argmin(dist, 1))
def test_ArgMin(self): t = tf.argmin(self.random(3, 4, 2), 1) self.check(t)
def argmin(x, axis=-1): if axis < 0: axis = axis % len(x.get_shape()) return tf.argmin(x, axis)
def kMeans(iterations, labelledSet, columnPrefix="cluster"): X = labelledSet.as_matrix() start_pos = tf.Variable(X[np.random.randint(X.shape[0], size=iterations),:], dtype=tf.float32) centroids = tf.Variable(start_pos.initialized_value(), "S", dtype=tf.float32) points = tf.Variable(X, 'X', dtype=tf.float32) ones_like = tf.ones((points.get_shape()[0], 1)) prev_assignments = tf.Variable(tf.zeros((points.get_shape()[0], ), dtype=tf.int64)) p1 = tf.matmul( tf.expand_dims(tf.reduce_sum(tf.square(points), 1), 1), tf.ones(shape=(1, iterations)) ) p2 = tf.transpose(tf.matmul( tf.reshape(tf.reduce_sum(tf.square(centroids), 1), shape=[-1, 1]), ones_like, transpose_b=True )) distance = tf.sqrt(tf.add(p1, p2) - 2 * tf.matmul(points, centroids, transpose_b=True)) point_to_centroid_assignment = tf.argmin(distance, axis=1) total = tf.unsorted_segment_sum(points, point_to_centroid_assignment, iterations) count = tf.unsorted_segment_sum(ones_like, point_to_centroid_assignment, iterations) means = total / count is_continue = tf.reduce_any(tf.not_equal(point_to_centroid_assignment, prev_assignments)) with tf.control_dependencies([is_continue]): loop = tf.group(centroids.assign(means), prev_assignments.assign(point_to_centroid_assignment)) sess = tf.Session() sess.run(tf.global_variables_initializer()) has_changed, cnt = True, 0 while has_changed and cnt < 300: cnt += 1 has_changed, _ = sess.run([is_continue, loop]) res = sess.run(point_to_centroid_assignment) return pandas.DataFrame(res, columns=[columnPrefix + "_" + str(iterations)])
def _deepfoolx(model, x, epochs, eta, clip_min, clip_max, min_prob): y0 = tf.stop_gradient(model(x)) y0 = tf.reshape(y0, [-1]) k0 = tf.argmax(y0) ydim = y0.get_shape().as_list()[0] xdim = x.get_shape().as_list()[1:] xflat = _prod(xdim) def _cond(i, z): xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max) y = tf.reshape(model(xadv), [-1]) p = tf.reduce_max(y) k = tf.argmax(y) return tf.logical_and(tf.less(i, epochs), tf.logical_or(tf.equal(k0, k), tf.less(p, min_prob))) def _body(i, z): xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max) y = tf.reshape(model(xadv), [-1]) gs = [tf.reshape(tf.gradients(y[i], xadv)[0], [-1]) for i in range(ydim)] g = tf.stack(gs, axis=0) yk, yo = y[k0], tf.concat((y[:k0], y[(k0+1):]), axis=0) gk, go = g[k0], tf.concat((g[:k0], g[(k0+1):]), axis=0) yo.set_shape(ydim - 1) go.set_shape([ydim - 1, xflat]) a = tf.abs(yo - yk) b = go - gk c = tf.norm(b, axis=1) score = a / c ind = tf.argmin(score) si, bi = score[ind], b[ind] dx = si * bi dx = tf.reshape(dx, [-1] + xdim) return i+1, z+dx _, noise = tf.while_loop(_cond, _body, [0, tf.zeros_like(x)], name='_deepfoolx_impl', back_prop=False) return noise
def pit_mse_loss(s_x, s_y, pit_axis=1, perm_size=None, name='pit_loss'): ''' Permutation invariant MSE loss, batched version Args: s_x: tensor s_y: tensor pit_axis: which axis permutations occur perm_size: size of permutation, infer from tensor shape by default name: string Returns: s_loss, v_perms, s_loss_sets_idx s_loss: scalar loss v_perms: constant int matrix of permutations s_perm_sets_idx: int matrix, indicating selected permutations ''' x_shp = s_x.get_shape().as_list() ndim = len(x_shp) batch_size = x_shp[0] if batch_size is None: batch_size = hparams.BATCH_SIZE assert -ndim <= pit_axis < ndim pit_axis %= ndim assert pit_axis != 0 reduce_axes = [ i for i in range(1, ndim+1) if i not in [pit_axis, pit_axis+1]] with tf.variable_scope(name): v_perms = tf.constant( list(itertools.permutations(range(hparams.MAX_N_SIGNAL))), dtype=hparams.INTX) s_perms_onehot = tf.one_hot( v_perms, hparams.MAX_N_SIGNAL, dtype=hparams.FLOATX) s_x = tf.expand_dims(s_x, pit_axis+1) s_y = tf.expand_dims(s_y, pit_axis) if s_x.dtype.is_complex and s_y.dtype.is_complex: s_diff = s_x - s_y s_cross_loss = tf.reduce_mean( tf.square(tf.real(s_diff)) + tf.square(tf.imag(s_diff)), reduce_axes) else: s_cross_loss = tf.reduce_mean( tf.squared_difference(s_x, s_y), reduce_axes) s_loss_sets = tf.einsum( 'bij,pij->bp', s_cross_loss, s_perms_onehot) s_loss_sets_idx = tf.argmin(s_loss_sets, axis=1) s_loss = tf.gather_nd( s_loss_sets, tf.stack([ tf.range(hparams.BATCH_SIZE, dtype=tf.int64), s_loss_sets_idx], axis=1)) s_loss = tf.reduce_mean(s_loss) return s_loss, v_perms, s_loss_sets_idx
def __call__(self, s_embed, s_src_pwr=None, s_mix_pwr=None, s_embed_flat=None): with tf.variable_scope(self.name): v_anchors = tf.get_variable( 'anchors', [hparams.NUM_ANCHOR, hparams.EMBED_SIZE], initializer=tf.random_normal_initializer( stddev=1.)) # all combinations of anchors s_anchor_sets = ops.combinations( v_anchors, hparams.MAX_N_SIGNAL) # equation (6) s_anchor_assignment = tf.einsum( 'btfe,pce->bptfc', s_embed, s_anchor_sets) s_anchor_assignment = tf.nn.softmax(s_anchor_assignment) # equation (7) s_attractor_sets = tf.einsum( 'bptfc,btfe->bpce', s_anchor_assignment, s_embed) s_attractor_sets /= tf.expand_dims( tf.reduce_sum(s_anchor_assignment, axis=(2,3)), -1) # equation (8) s_in_set_similarities = tf.reduce_max( tf.matmul( s_attractor_sets, tf.transpose(s_attractor_sets, [0, 1, 3, 2])), axis=(-1, -2)) # equation (9) s_subset_choice = tf.argmin(s_in_set_similarities, axis=1) s_subset_choice = tf.transpose(tf.stack([ tf.range(hparams.BATCH_SIZE, dtype=tf.int64), s_subset_choice])) s_attractors = tf.gather_nd(s_attractor_sets, s_subset_choice) if hparams.DEBUG: self.debug_fetches = dict( asets=s_attractor_sets, anchors=v_anchors, subset_choice=s_subset_choice) return s_attractors
def _construct_sequence(batch): hidden, boxes = batch # initializing the state with features states = [hidden[0]] # TODO: make this dependent on the data # TODO: make it with scan ? for t in range(1, T): # find the matching boxes. TODO: try with the soft matching function if c.match_kind == 'boxes': dists = nnutil.cdist(boxes[t-1], boxes[t]) idxs = tf.argmin(dists, 1, 'idxs') state_prev = tf.gather(states[t-1], idxs) elif c.match_kind == 'hidden': # TODO: actually it makes more sense to compare on states dists = nnutil.cdist(hidden[t-1], hidden[t]) idxs = tf.argmin(dists, 1, 'idxs') state_prev = tf.gather(states[t-1], idxs) elif c.match_kind == 'hidden-soft': dists = nnutil.cdist(hidden[t-1], hidden[t]) weights = slim.softmax(dists) state_prev = tf.matmul(weights, states[t-1]) else: raise RuntimeError('Unknown match_kind: %s' % c.match_kind) def _construct_update(reuse): state = tf.concat(1, [state_prev, hidden[t]]) # TODO: initialize jointly reset = slim.fully_connected(state, NFH, tf.nn.sigmoid, reuse=reuse, scope='reset') step = slim.fully_connected(state, NFH, tf.nn.sigmoid, reuse=reuse, scope='step') state_r = tf.concat(1, [reset * state_prev, hidden[t]]) state_up = slim.fully_connected(state_r, NFH, tf.nn.tanh, reuse=reuse, scope='state_up') return state_up, step try: state_up, step = _construct_update(reuse=True) except ValueError: state_up, step = _construct_update(reuse=False) state = step * state_up + (1.0 - step) * state_prev states.append(state) return tf.pack(states)
def build_model_graph(self): self.filter_tensors = {} self.bias_tensors = {} # lots to decisions with tf.variable_scope(self.name) as self.scope: self.input_place_holder = tf.placeholder(tf.float32, shape=(None, self.params.window, self.params.ob_size * 4 + 2), name='input') curr_dimension = [tf.shape(self.input_place_holder)[0], self.params.window, self.params.ob_size * 4 + 2, 1] curr_layer = tf.reshape(self.input_place_holder, curr_dimension) for name, layer_params in sorted(self.layers.items()): print curr_dimension print curr_layer if layer_params['type'] == 'conv': n = 'conv_{}_filter_size_{}_stride_{}_num_{}'.format(name, layer_params['size'], layer_params['stride'], layer_params['num']) s = [layer_params['size'], layer_params['size'], curr_dimension[3], layer_params['num']] strides = [1, layer_params['stride'], layer_params['stride'], 1] self.filter_tensors[name] = tf.Variable(tf.truncated_normal(s, stddev=0.0001), name=n) self.bias_tensors[name] = tf.Variable(tf.truncated_normal(shape=[layer_params['num']], stddev=0.1), name=n + '_bias') conv_output = tf.nn.conv2d(curr_layer, self.filter_tensors[name], strides, "VALID") conv_bias = tf.nn.bias_add(conv_output, self.bias_tensors[name]) curr_layer = tf.nn.relu(conv_bias) curr_dimension = compute_output_size(curr_dimension[0], curr_dimension[1], curr_dimension[2],layer_params['size'], layer_params['stride'], 0, layer_params['num']) if layer_params['type'] == 'pool': if layer_params['pool_type'] == 'max': s = [1, layer_params['size'], layer_params['size'], 1] stride = [1, layer_params['stride'], layer_params['stride'], 1] x = tf.nn.max_pool(curr_layer, s, stride, 'VALID') curr_layer = x curr_dimension = compute_pool_size(curr_dimension[0], curr_dimension[1], curr_dimension[2],layer_params['size'], layer_params['stride'], curr_dimension[3]) if layer_params['pool_type'] == 'avg': s = [1, layer_params['size'], layer_params['size'], 1] stride = [1, layer_params['stride'], layer_params['stride'], 1] x = tf.nn.avg_pool(curr_layer, s, stride, 'VALID') curr_layer = x curr_dimension = compute_pool_size(curr_dimension[0], curr_dimension[1], curr_dimension[2],layer_params['size'], layer_params['stride'], curr_dimension[3]) if layer_params['type'] == 'fc': print 'hi' print curr_dimension print curr_layer if not self.advantage: final_s = [curr_dimension[1], curr_dimension[2], curr_dimension[3],self.params.actions] strides = [1,1,1,1] projection = tf.Variable(tf.truncated_normal(final_s, stddev=0.1), name="final_projection") bias = tf.Variable(tf.truncated_normal([self.params.actions], stddev=0.1), name="final_projection") self.outs = tf.nn.conv2d(curr_layer, projection, strides, 'VALID') + bias self.predictions = tf.squeeze(self.outs, squeeze_dims=[1, 2]) else: self.advantage_stream, self.value_stream = tf.split(curr_layer, 2, 3) final_s_a = [curr_dimension[1], curr_dimension[2], curr_dimension[3]/2, self.params.actions] final_s_v = [curr_dimension[1], curr_dimension[2], curr_dimension[3]/2, 1] strides = [1,1,1,1] self.projection_a = tf.Variable(tf.truncated_normal(final_s_a, stddev=0.01), name="final_projection") self.projection_v = tf.Variable(tf.truncated_normal(final_s_v, stddev=0.01), name="final_projection") self.A = tf.squeeze(tf.nn.conv2d(self.advantage_stream, self.projection_a, strides, 'VALID'), squeeze_dims=[1,2]) self.V = tf.squeeze(tf.nn.conv2d(self.value_stream, self.projection_v, strides, 'VALID'), squeeze_dims=[1,2]) self.predictions = self.V + tf.subtract(self.A, tf.reduce_mean(self.A, axis=1, keep_dims=True)) self.min_score = tf.reduce_min(self.predictions, axis=[1]) self.min_action = tf.argmin(tf.squeeze(self.predictions), axis=0, name="arg_min")