我们从Python开源项目中,提取了以下6个代码示例,用于说明如何使用tensorflow.unique_with_counts()。
def predict_snps(y, cut_off_prob=0.5, already_split=False): """Predicts which snps are causing epistasis based on one epoch and how many snps to detect. Arguments: y: the given output tensor cut_off_prob: float describing the cutoff probability for a snp to be described as predicted to cause. Recommended Values: 0.5 for 2-classifier model 0.98 for 1-classifier model Returns: predicted_snps: a tensor with the indices of the predicted snps """ with tf.name_scope('snp_prediction'): if not already_split: y_left = get_causing_epi_probs(y) else: y_left = y y_left_t = tf.transpose(y_left, [0, 2, 1]) top_snps = tf.where(tf.greater_equal(y_left, cut_off_prob)) _, top_snp_indices, _ = tf.split(1, 3, top_snps, name='split') top_snp_indices = tf.reshape(top_snp_indices, [-1]) top_pred_snps, _, count = tf.unique_with_counts(top_snp_indices) return top_pred_snps, count
def build(self, predictions, targets, inputs=None): """ Prints the number of each kind of prediction """ self.built = True pshape = predictions.get_shape() self.inner_metric.build(predictions, targets, inputs) with tf.name_scope(self.name): if len(pshape) == 1 or (len(pshape) == 2 and int(pshape[1]) == 1): self.name = self.name or "binary_prediction_counts" y, idx, count = tf.unique_with_counts(tf.argmax(predictions)) self.tensor = tf.Print(self.inner_metric, [y, count], name=self.inner_metric.name) else: self.name = self.name or "categorical_prediction_counts" y, idx, count = tf.unique_with_counts(tf.argmax(predictions, dimension=1)) self.tensor = tf.Print(self.inner_metric.tensor, [y, count], name=self.inner_metric.name)
def source_distance(x,y): y = tf.cast(tf.argmax(y,axis=1),tf.float32) y1,_,_ = tf.unique_with_counts(y) TensorArr = tf.TensorArray(tf.float32,size=1, dynamic_size=True,clear_after_read=False) x_array = TensorArr.unstack(y1) size = x_array.size() initial_outputs = tf.TensorArray(dtype=tf.float32,size=size) i = tf.constant(0) def should_continue(i, *args): return i < size def loop(i,output): y_class = x_array.read(i) idx_i = tf.where(tf.equal(y,y_class)) xi = tf.gather_nd(x,idx_i) initial_outputs1 = tf.TensorArray(dtype=tf.float32,size=size) j = tf.constant(0) def should_continue1(j,*args): return j<size def loop1(j,output1): y2=x_array.read(j) idx_j = tf.where(tf.equal(y,y2)) xj = tf.gather_nd(x,idx_j) dis = tf.reduce_mean (tf.square(tf.reduce_mean(xi,0) -tf.reduce_mean(xj,0))) output1 = output1.write(j,dis) return j+1,output1 j,r1=tf.while_loop(should_continue1,loop1,[j,initial_outputs1]) output = output.write(i,r1.stack()) return i+1,output i,r = tf.while_loop(should_continue,loop,[i,initial_outputs]) out = r.stack() return out
def match_to_dict_conv(image_as_patches, dictionary, include_counts=False): print 'match_to_dict_conv' [n,w,h,c] = dictionary.get_shape().as_list() #dict_as_filt = tf.transpose(tf.reshape(dictionary, [-1, w*h*c,1,1])) dict_as_filt = tf.transpose(tf.reshape(dictionary, [-1, w*h*c])) print dict_as_filt.get_shape() [n,w,h,c] = image_as_patches.get_shape().as_list() #image_flattened = tf.reshape(image_as_patches, [-1,1,1,w*h*c]) image_flattened = tf.reshape(image_as_patches, [-1,w*h*c]) print image_flattened.get_shape() #pair_dist = -2 * tf.reshape(tf.nn.conv2d(image_flattened, dict_as_filt, [1,1,1,1], 'SAME'), [n, -1]) pair_dist = -2 * tf.matmul(image_flattened, dict_as_filt) print pair_dist.get_shape() single_dist = tf.reduce_sum(tf.square(dictionary),[1,2,3]) distance = single_dist + pair_dist print distance.get_shape() min_loc = tf.argmin(distance,1) print min_loc.get_shape() if include_counts: y, _, count = tf.unique_with_counts(min_loc) return tf.gather(dictionary, min_loc), [y, count] else: return tf.gather(dictionary, min_loc)
def compute_spans(start_scores, end_scores, answer2support, is_eval, support2question, beam_size=1, max_span_size=10000, correct_start=None): max_support_length = tf.shape(start_scores)[1] _, _, num_doc_per_question = tf.unique_with_counts(support2question) offsets = tf.cumsum(num_doc_per_question, exclusive=True) doc_idx_for_support = tf.range(tf.shape(support2question)[0]) - tf.gather(offsets, support2question) def train(): gathered_end_scores = tf.gather(end_scores, answer2support) gathered_start_scores = tf.gather(start_scores, answer2support) if correct_start is not None: # assuming we know the correct start we only consider ends after that left_mask = misc.mask_for_lengths(tf.cast(correct_start, tf.int32), max_support_length, mask_right=False) gathered_end_scores = gathered_end_scores + left_mask predicted_start_pointer = tf.argmax(gathered_start_scores, axis=1, output_type=tf.int32) predicted_end_pointer = tf.argmax(gathered_end_scores, axis=1, output_type=tf.int32) return (start_scores, end_scores, tf.gather(doc_idx_for_support, answer2support), predicted_start_pointer, predicted_end_pointer) def eval(): # we collect spans for top k starts and top k ends and select the top k from those top 2k doc_idx1, start_pointer1, end_pointer1, span_score1 = _get_top_k( start_scores, end_scores, beam_size, max_span_size, support2question) doc_idx2, end_pointer2, start_pointer2, span_score2 = _get_top_k( end_scores, start_scores, beam_size, -max_span_size, support2question) doc_idx = tf.concat([doc_idx1, doc_idx2], 1) start_pointer = tf.concat([start_pointer1, start_pointer2], 1) end_pointer = tf.concat([end_pointer1, end_pointer2], 1) span_score = tf.concat([span_score1, span_score2], 1) _, idx = tf.nn.top_k(span_score, beam_size) r = tf.range(tf.shape(span_score)[0], dtype=tf.int32) r = tf.reshape(tf.tile(tf.expand_dims(r, 1), [1, beam_size]), [-1, 1]) idx = tf.concat([r, tf.reshape(idx, [-1, 1])], 1) doc_idx = tf.gather_nd(doc_idx, idx) start_pointer = tf.gather_nd(start_pointer, idx) end_pointer = tf.gather_nd(end_pointer, idx) return (start_scores, end_scores, tf.gather(doc_idx_for_support, doc_idx), start_pointer, end_pointer) return tf.cond(is_eval, eval, train)