我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用tensorflow.floordiv()。
def sample(self, logits, time): rl_time_steps = tf.floordiv(tf.maximum(self.global_step_tensor - self.burn_in_step, 0), self.increment_step) start_rl_step = self.sequence_length - rl_time_steps next_input_ids = tf.cond( tf.greater_equal(time, self.max_sequence_length), lambda: tf.tile([self.eos_id], [self.batch_size]), lambda: self._input_tas.read(time)) next_predicted_ids = tf.squeeze(tf.multinomial(logits, 1), axis=[-1]) mask = tf.to_int32(time >= start_rl_step) return (1 - mask) * tf.to_int32(next_input_ids) + mask * tf.to_int32( next_predicted_ids)
def extract_image_patches(x, ksizes, ssizes, padding='same', data_format='channels_last'): ''' Extract the patches from an image # Parameters x : The input image ksizes : 2-d tuple with the kernel size ssizes : 2-d tuple with the strides size padding : 'same' or 'valid' data_format : 'channels_last' or 'channels_first' # Returns The (k_w,k_h) patches extracted TF ==> (batch_size,w,h,k_w,k_h,c) TH ==> (batch_size,w,h,c,k_w,k_h) ''' kernel = [1, ksizes[0], ksizes[1], 1] strides = [1, ssizes[0], ssizes[1], 1] padding = _preprocess_padding(padding) if data_format == 'channels_first': x = KTF.permute_dimensions(x, (0, 2, 3, 1)) bs_i, w_i, h_i, ch_i = KTF.int_shape(x) patches = tf.extract_image_patches(x, kernel, strides, [1, 1, 1, 1], padding) # Reshaping to fit Theano bs, w, h, ch = KTF.int_shape(patches) patches = tf.reshape(tf.transpose(tf.reshape(patches, [-1, w, h, tf.floordiv(ch, ch_i), ch_i]), [0, 1, 2, 4, 3]), [-1, w, h, ch_i, ksizes[0], ksizes[1]]) if data_format == 'channels_last': patches = KTF.permute_dimensions(patches, [0, 1, 2, 4, 5, 3]) return patches
def __floordiv__(self, other): return tf.floordiv(self, other)
def __rfloordiv__(self, other): return tf.floordiv(other, self)
def gather_forced_att_logits(encoder_input_symbols, encoder_decoder_vocab_map, att_logit, batch_size, attn_length, target_vocab_size): """Gathers attention weights as logits for forced attention.""" flat_input_symbols = tf.reshape(encoder_input_symbols, [-1]) flat_label_symbols = tf.gather(encoder_decoder_vocab_map, flat_input_symbols) flat_att_logits = tf.reshape(att_logit, [-1]) flat_range = tf.to_int64(tf.range(tf.shape(flat_label_symbols)[0])) batch_inds = tf.floordiv(flat_range, attn_length) position_inds = tf.mod(flat_range, attn_length) attn_vocab_inds = tf.transpose(tf.pack( [batch_inds, position_inds, tf.to_int64(flat_label_symbols)])) # Exclude indexes of entries with flat_label_symbols[i] = -1. included_flat_indexes = tf.reshape(tf.where(tf.not_equal( flat_label_symbols, -1)), [-1]) included_attn_vocab_inds = tf.gather(attn_vocab_inds, included_flat_indexes) included_flat_att_logits = tf.gather(flat_att_logits, included_flat_indexes) sparse_shape = tf.to_int64(tf.pack( [batch_size, attn_length, target_vocab_size])) sparse_label_logits = tf.SparseTensor(included_attn_vocab_inds, included_flat_att_logits, sparse_shape) forced_att_logit_sum = tf.sparse_reduce_sum(sparse_label_logits, [1]) forced_att_logit = tf.reshape(forced_att_logit_sum, [-1, target_vocab_size]) return forced_att_logit
def test_FloorDiv(self): t = tf.floordiv(*self.random((3, 5), (3, 5))) self.check(t)
def __unpool(self, updates, mask, ksize=[1, 2, 2, 1], output_shape=None, feature_count=None, name=''): with tf.variable_scope(name): mask = tf.cast(mask, tf.int32) input_shape = tf.shape(updates, out_type=tf.int32) # calculation new shape if feature_count is None: feature_count = input_shape[3] if output_shape is None: output_shape = (1, input_shape[1] * ksize[1], input_shape[2] * ksize[2], feature_count) output_shape = tf.cast(output_shape, tf.int32) # calculation indices for batch, height, width and feature maps one_like_mask = tf.cast(tf.ones_like(mask, dtype=tf.int16), tf.int32) batch_shape = tf.concat([[input_shape[0]], [1], [1], [1]], 0) batch_range = tf.reshape(tf.range(output_shape[0], dtype=tf.int32), shape=batch_shape) b = one_like_mask * batch_range y = tf.floordiv(mask, output_shape[2] * output_shape[3]) x = tf.mod(tf.floordiv(mask, output_shape[3]), output_shape[2]) #mask % (output_shape[2] * output_shape[3]) // output_shape[3] feature_range = tf.range(output_shape[3], dtype=tf.int32) f = one_like_mask * feature_range # transpose indices & reshape update values to one dimension updates_size = tf.size(updates) indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, updates_size])) values = tf.reshape(updates, [updates_size]) ret = tf.scatter_nd(indices, values, output_shape) return ret
def extract_features(inputs, k_idxs, map_h): """Extract top k fine features NOTE. do not use tf.image.extract_glimpse ops to get input patches (cf. https://github.com/tensorflow/tensorflow/issues/2134) """ def _extract_feature(inputs, idxs): idxs = tf.expand_dims(idxs,1) idx_i = tf.floordiv(idxs, map_h) idx_j = tf.mod(idxs, map_h) # NOTE: the below origins are starting points, not center! origin_i = 2*(2*idx_i+1)+3 - 5 + 2 origin_j = 2*(2*idx_j+1)+3 - 5 + 2 origin_centers = tf.concat(1,[origin_i,origin_j]) # NOTE: size also depends on the architecture #patches = tf.image.extract_glimpse(inputs, size=[14,14], offsets=origin_centers, # centered=False, normalized=False) patches = extract_patches(inputs, size=[14,14], offsets=origin_centers) #fine_features = fine_layers(patches) fine_features = [] src_idxs = tf.concat(1,[idx_i,idx_j]) return fine_features, src_idxs, patches k_features = [] k_src_idxs = [] k_patches = [] for i in xrange(N_PATCHES): fine_feature, src_idx, patches = _extract_feature(inputs,k_idxs[:,i]) k_features.append(fine_feature) k_src_idxs.append(src_idx) k_patches.append(patches) concat_patches = tf.concat(0,k_patches) concat_k_features = fine_layers(concat_patches) k_features = tf.split(0,N_PATCHES,concat_k_features) return k_features, k_src_idxs, k_patches