我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.reduce_prod()。
def variable_summaries(var, name, collections=None): """Attach a lot of summaries to a Tensor (for TensorBoard visualization). Args: - var: Tensor for variable from which we want to log. - name: Variable name. - collections: List of collections to save the summary to. """ with tf.name_scope(name): mean = tf.reduce_mean(var) tf.summary.scalar('mean', mean, collections) num_params = tf.reduce_prod(tf.shape(var)) tf.summary.scalar('num_params', num_params, collections) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar('stddev', stddev, collections) tf.summary.scalar('max', tf.reduce_max(var), collections) tf.summary.scalar('min', tf.reduce_min(var), collections) tf.summary.histogram('histogram', var, collections) tf.summary.scalar('sparsity', tf.nn.zero_fraction(var), collections)
def extract_patches_fn(image: tf.Tensor, patch_shape: list, offsets) -> tf.Tensor: """ :param image: tf.Tensor :param patch_shape: [h, w] :param offsets: tuple between 0 and 1 :return: patches [batch_patches, h, w, c] """ with tf.name_scope('patch_extraction'): h, w = patch_shape c = image.get_shape()[-1] offset_h = tf.cast(tf.round(offsets[0] * h // 2), dtype=tf.int32) offset_w = tf.cast(tf.round(offsets[1] * w // 2), dtype=tf.int32) offset_img = image[offset_h:, offset_w:, :] offset_img = offset_img[None, :, :, :] patches = tf.extract_image_patches(offset_img, ksizes=[1, h, w, 1], strides=[1, h // 2, w // 2, 1], rates=[1, 1, 1, 1], padding='VALID') patches_shape = tf.shape(patches) return tf.reshape(patches, [tf.reduce_prod(patches_shape[0:3]), h, w, int(c)]) # returns [batch_patches, h, w, c]
def _max_pool_grad_grad(dy, x, y, ksize, strides, padding, argmax=None): """Gradients of MaxPoolGrad.""" if argmax is None: _, argmax = tf.nn.max_pool_with_argmax(x, ksize, strides, padding) grad = dy grad_flat = tf.reshape(grad, [-1]) argmax_flat = tf.reshape(argmax, [-1]) x_shape = tf.cast(tf.shape(x), argmax.dtype) batch_dim = tf.reshape( tf.range( x_shape[0], dtype=argmax.dtype), [-1, 1, 1, 1]) nelem = tf.reduce_prod(x_shape[1:]) batch_dim *= nelem y_zero = tf.zeros_like(y, dtype=argmax.dtype) batch_dim += y_zero batch_dim = tf.reshape(batch_dim, [-1]) argmax_flat += batch_dim grad_input = tf.gather(grad_flat, argmax_flat) grad_input = tf.reshape(grad_input, tf.shape(y)) return grad_input
def ternary_decoder(encoded_data, scaler, shape): """Decoding the signs to float format """ a = tf.cast(encoded_data, tf.int32) a_split1 = tf.mod(a,4) a_split2 = tf.to_int32(tf.mod(a/4,4)) a_split3 = tf.to_int32(tf.mod(a/16,4)) a_split4 = tf.to_int32(tf.mod(a/64,4)) a = tf.concat([a_split1, a_split2, a_split3, a_split4], 0) real_size = tf.reduce_prod(shape) a = tf.to_float(a) a = tf.gather(a, tf.range(0,real_size)) a = tf.reshape(a, shape) a = tf.subtract(a,1) decoded = a*scaler return decoded
def f_inter_box(top_left_a, bot_right_a, top_left_b, bot_right_b): """Computes intersection area with boxes. Args: top_left_a: [B, T, 2] or [B, 2] bot_right_a: [B, T, 2] or [B, 2] top_left_b: [B, T, 2] or [B, 2] bot_right_b: [B, T, 2] or [B, 2] Returns: area: [B, T] """ top_left_max = tf.maximum(top_left_a, top_left_b) bot_right_min = tf.minimum(bot_right_a, bot_right_b) ndims = tf.shape(tf.shape(top_left_a)) # Check if the resulting box is valid. overlap = tf.to_float(top_left_max < bot_right_min) overlap = tf.reduce_prod(overlap, ndims - 1) area = tf.reduce_prod(bot_right_min - top_left_max, ndims - 1) area = overlap * tf.abs(area) return area
def f_iou_box_old(top_left_a, bot_right_a, top_left_b, bot_right_b): """Computes IoU of boxes. Args: top_left_a: [B, T, 2] or [B, 2] bot_right_a: [B, T, 2] or [B, 2] top_left_b: [B, T, 2] or [B, 2] bot_right_b: [B, T, 2] or [B, 2] Returns: iou: [B, T] """ inter_area = f_inter_box(top_left_a, bot_right_a, top_left_b, bot_right_b) inter_area = tf.maximum(inter_area, 1e-6) ndims = tf.shape(tf.shape(top_left_a)) # area_a = tf.reduce_prod(bot_right_a - top_left_a, ndims - 1) # area_b = tf.reduce_prod(bot_right_b - top_left_b, ndims - 1) check_a = tf.reduce_prod(tf.to_float(top_left_a < bot_right_a), ndims - 1) area_a = check_a * tf.reduce_prod(bot_right_a - top_left_a, ndims - 1) check_b = tf.reduce_prod(tf.to_float(top_left_b < bot_right_b), ndims - 1) area_b = check_b * tf.reduce_prod(bot_right_b - top_left_b, ndims - 1) union_area = (area_a + area_b - inter_area + 1e-5) union_area = tf.maximum(union_area, 1e-5) iou = inter_area / union_area iou = tf.maximum(iou, 1e-5) iou = tf.minimum(iou, 1.0) return iou
def get_filled_box_idx(idx, top_left, bot_right): """Fill a box with top left and bottom right coordinates. Args: idx: [B, T, H, W, 2] or [B, H, W, 2] or [H, W, 2] top_left: [B, T, 2] or [B, 2] or [2] bot_right: [B, T, 2] or [B, 2] or [2] """ ss = tf.shape(idx) ndims = tf.shape(ss) batch = tf.slice(ss, [0], ndims - 3) coord_shape = tf.concat(0, [batch, tf.constant([1, 1, 2])]) top_left = tf.reshape(top_left, coord_shape) bot_right = tf.reshape(bot_right, coord_shape) lower = tf.reduce_prod(tf.to_float(idx >= top_left), ndims - 1) upper = tf.reduce_prod(tf.to_float(idx <= bot_right), ndims - 1) box = lower * upper return box
def prod(x, axis=None, keepdims=False): """Multiplies the values in a tensor, alongside the specified axis. # Arguments x: A tensor or variable. axis: An integer, the axis to compute the product. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. # Returns A tensor with the product of elements of `x`. """ axis = _normalize_axis(axis, ndim(x)) return tf.reduce_prod(x, reduction_indices=axis, keep_dims=keepdims)
def sparse_filtering_loss(_, y_pred): '''Defines the sparse filtering loss function. Args: y_true (tensor): The ground truth tensor (not used, since this is an unsupervised learning algorithm). y_pred (tensor): Tensor representing the feature vector at a particular layer. Returns: scalar tensor: The sparse filtering loss. ''' y = tf.reshape(y_pred, tf.stack([-1, tf.reduce_prod(y_pred.shape[1:])])) l2_normed = tf.nn.l2_normalize(y, dim=1) l1_norm = tf.norm(l2_normed, ord=1, axis=1) return tf.reduce_sum(l1_norm)
def __init__(self, input_, outdim=2, debug=False): assert outdim >= 1 self._outdim = outdim input_shape = tuple(input_.get_shape().as_list()) to_flatten = input_shape[self._outdim - 1:] if any(s is None for s in to_flatten): flattened = None else: flattened = int(np.prod(to_flatten)) self._output_shape = input_shape[1:self._outdim - 1] + (flattened,) if debug: util.header('Flatten(new_shape=%s)' % str(self._output_shape)) pre_shape = tf.shape(input_)[:self._outdim - 1:] to_flatten = tf.reduce_prod(tf.shape(input_)[self._outdim - 1:]) self._output = tf.reshape(input_, tf.concat(0, [pre_shape, tf.pack([to_flatten])]))
def _usage_after_read(self, prev_usage, free_gate, read_weights): """Calcualtes the new usage after reading and freeing from memory. Args: prev_usage: tensor of shape `[batch_size, memory_size]`. free_gate: tensor of shape `[batch_size, num_reads]` with entries in the range [0, 1] indicating the amount that locations read from can be freed. read_weights: tensor of shape `[batch_size, num_reads, memory_size]`. Returns: New usage, a tensor of shape `[batch_size, memory_size]`. """ with tf.name_scope('usage_after_read'): free_gate = tf.expand_dims(free_gate, -1) free_read_weights = free_gate * read_weights phi = tf.reduce_prod(1 - free_read_weights, [1], name='phi') return prev_usage * phi
def gather_indices_2d(x, block_shape, block_stride): """Getting gather indices.""" # making an identity matrix kernel kernel = tf.eye(block_shape[0] * block_shape[1]) kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1]) # making indices [1, h, w, 1] to appy convs x_shape = common_layers.shape_list(x) indices = tf.range(x_shape[2] * x_shape[3]) indices = tf.reshape(indices, [1, x_shape[2], x_shape[3], 1]) indices = tf.nn.conv2d( tf.cast(indices, tf.float32), kernel, strides=[1, block_stride[0], block_stride[1], 1], padding="VALID") # making indices [num_blocks, dim] to gather dims = common_layers.shape_list(indices)[:3] if all([isinstance(dim, int) for dim in dims]): num_blocks = functools.reduce(operator.mul, dims, 1) else: num_blocks = tf.reduce_prod(dims) indices = tf.reshape(indices, [num_blocks, -1]) return tf.cast(indices, tf.int32)
def hypervolume(self, reference): """ Autoflow method to calculate the hypervolume indicator The hypervolume indicator is the volume of the dominated region. :param reference: reference point to use Should be equal or bigger than the anti-ideal point of the Pareto set For comparing results across runs the same reference point must be used :return: hypervolume indicator (the higher the better) """ min_pf = tf.reduce_min(self.front, 0, keep_dims=True) R = tf.expand_dims(reference, 0) pseudo_pf = tf.concat((min_pf, self.front, R), 0) D = tf.shape(pseudo_pf)[1] N = tf.shape(self.bounds.ub)[0] idx = tf.tile(tf.expand_dims(tf.range(D), -1),[1, N]) ub_idx = tf.reshape(tf.stack([tf.transpose(self.bounds.ub), idx], axis=2), [N * D, 2]) lb_idx = tf.reshape(tf.stack([tf.transpose(self.bounds.lb), idx], axis=2), [N * D, 2]) ub = tf.reshape(tf.gather_nd(pseudo_pf, ub_idx), [D, N]) lb = tf.reshape(tf.gather_nd(pseudo_pf, lb_idx), [D, N]) hv = tf.reduce_sum(tf.reduce_prod(ub - lb, 0)) return tf.reduce_prod(R - min_pf) - hv
def get_marginal_likelihood(yt, mean_yt, xt, s, alpha, beta, eta_mu, eta_sigma, eps, sigma_px, epsilon = 1e-8): yt_expand = tf.expand_dims(yt, 0) mean_yt = tf.reshape(mean_yt, [s, FLAGS.batch_size, 784]) xt = tf.reshape(xt, [1, s, FLAGS.batch_size, FLAGS.hidden_size]) # p_ygivenx = tf.reduce_prod(tf.pow(mean_yt, yt_expand) * tf.pow(1 - mean_yt, 1 - yt_expand), axis=2) v = alpha / (alpha + beta) pi = tf.concat(0, [v, [1.0]]) * tf.concat(0, [[1.0], tf.cumprod(1 - v)]) p_x = gaussian_mixture_pdf(eta_mu, tf.square(eta_sigma) + tf.square(sigma_px), xt, pi) log_p_y_s = tf.reduce_sum(yt_expand * tf.log(mean_yt + epsilon) \ + (1.0 - yt_expand) * tf.log(1.0 - mean_yt + epsilon), 2) \ + tf.log(p_x) \ + 0.5 * tf.reduce_sum(tf.square(eps), 2) log_p_y_s_max = tf.reduce_max(log_p_y_s, reduction_indices=0) log_p_y = tf.log(tf.reduce_mean(tf.exp(log_p_y_s - log_p_y_s_max), 0)) + log_p_y_s_max return tf.reduce_mean(log_p_y) # Taken from: https://github.com/tensorflow/tensorflow/issues/6322
def gauss_prob(mu, logstd, x): std = tf.exp(logstd) var = tf.square(std) gp = tf.exp(-(x - mu)/(2*var)) / ((2*np.pi)**.5 * std) return tf.reduce_prod(gp, [1])
def cnnmodel(X, Y, paras, flag='single'): assert(flag=='single' or flag=='combine') X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1]) yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1]) yonehot = tf.concat(3, [1-yreshape, yreshape]) if flag == 'combine': hconv4clip = buildcombmodel(X, paras) else: hconv4clip = buildmodel(X, paras) #hconv4log = -tf.log(hconv4clip) #q_train, q_test = crfrnn(hconv4log, paras['wsmooth'], paras['wcontra'], k1, k2, trainiter=5, testiter=10) #q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2]) q_train = -tf.log(hconv4clip) trainenergy = tf.reduce_sum((q_train)*yonehot, reduction_indices=3) #trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2]) trainenergy = tf.reduce_mean(trainenergy, [0,1,2]) q_test = hconv4clip #q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5) q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2]) testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3) #testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2]) testenergy = tf.reduce_mean(testenergy, [0,1,2]) predarg = tf.argmax(q_test, 3) yint64 = tf.to_int64(Y) acc = tf.equal(yint64, predarg) acc = tf.to_float(acc) accuracy = tf.reduce_mean(acc, [0,1,2]) di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,])) return trainenergy, accuracy, di, testenergy, q_test
def cnnmodel(X, Y, paras, flag='single'): assert(flag=='single' or flag=='combine') X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1]) yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1]) yonehot = tf.concat(3, [1-yreshape, yreshape]) if flag == 'combine': hconv4clip = buildcombmodel(X, paras) else: hconv4clip = buildmodel(X, paras) #hconv4log = -tf.log(hconv4clip) #q_train, q_test = crfrnn(hconv4log, paras['wsmooth'], paras['wcontra'], k1, k2, trainiter=5, testiter=10) #q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2]) q_train = -tf.log(hconv4clip) trainenergy = tf.reduce_sum((q_train)*yonehot, reduction_indices=3) #trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2]) trainenergy = tf.reduce_mean(trainenergy, [0,1,2]) q_test = hconv4clip #q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5) q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2]) testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3) #testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2]) testenergy = tf.reduce_mean(testenergy, [0,1,2]) predarg = tf.argmax(q_test, 3) yint64 = tf.to_int64(Y) acc = tf.equal(yint64, predarg) acc = tf.to_float(acc) accuracy = tf.reduce_mean(acc, [0,1,2]) di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,])) return trainenergy, accuracy, di, testenergy, predarg
def model(X, Y, k1, k2, paras, flag='single'): assert(flag=='single' or flag=='combine') X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1]) yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1]) yonehot = tf.concat(3, [1-yreshape, yreshape]) if flag == 'combine': hconv4clip = buildcombmodel(X, paras, fusion=False) #h1, h2, h3, h4 = tf.split(3, 4, hconv4clip) q_train, q_test = crfrnn(hconv4clip, paras['wsmooth'], paras['wcontra'], k1, k2, trainiter=5, testiter=10, wunary=paras['wunary']) else: hconv4clip = buildmodel(X, paras) q_train, q_test = crfrnn(hconv4clip, paras['wsmooth'], paras['wcontra'], k1, k2, trainiter=5, testiter=10) #hconv4log = -tf.log(hconv4clip) #q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2]) #q_train = -tf.log(hconv4clip) q_trainclip = tf.clip_by_value(q_train, 1e-6, 1.) trainenergy = tf.reduce_sum(-tf.log(q_trainclip)*yonehot, reduction_indices=3) #trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2]) trainenergy = tf.reduce_mean(trainenergy, [0,1,2]) #q_test = hconv4clip #q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5) q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2]) testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3) #testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2]) testenergy = tf.reduce_mean(testenergy, [0,1,2]) predarg = tf.argmax(q_test, 3) yint64 = tf.to_int64(Y) acc = tf.equal(yint64, predarg) acc = tf.to_float(acc) accuracy = tf.reduce_mean(acc, [0,1,2]) di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,])) return trainenergy, accuracy, di, testenergy, predarg
def log_norm(expr_list, name): """ :param expr_list: :param name: :return: """ n_elems = 0 norm = 0. for e in nest.flatten(expr_list): n_elems += tf.reduce_prod(tf.shape(e)) norm += tf.reduce_sum(e**2) norm /= tf.to_float(n_elems) tf.summary.scalar(name, norm) return norm
def xavier_normal_dist_conv3d(shape): return tf.truncated_normal(shape, mean=0, stddev=tf.sqrt(3. / (tf.reduce_prod(shape[:3]) * tf.reduce_sum(shape[3:]))))
def xavier_uniform_dist_conv3d(shape): with tf.variable_scope('xavier_glorot_initializer'): denominator = tf.cast((tf.reduce_prod(shape[:3]) * tf.reduce_sum(shape[3:])), tf.float32) lim = tf.sqrt(6. / denominator) return tf.random_uniform(shape, minval=-lim, maxval=lim)
def prod(x, axis=None, keepdims=False): '''Multiplies the values in a tensor, alongside the specified axis. ''' axis = _normalize_axis(axis, ndim(x)) return tf.reduce_prod(x, reduction_indices=axis, keep_dims=keepdims)
def get_output_for(self, input, **kwargs): # total_entries = tf.reduce_prod(tf.shape(input)) pre_shape = tf.shape(input)[:self.outdim - 1] to_flatten = tf.reduce_prod(tf.shape(input)[self.outdim - 1:]) return tf.reshape(input, tf.concat(0, [pre_shape, tf.pack([to_flatten])]))
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars): old_p = old_dist_info_vars["p"] new_p = new_dist_info_vars["p"] ndims = old_p.get_shape().ndims return tf.reduce_prod(x_var * new_p / (old_p + TINY) + (1 - x_var) * (1 - new_p) / (1 - old_p + TINY), reduction_indices=ndims - 1)
def _prob(self, given): return tf.reduce_prod(tf.ones_like(given), -1)
def prob(self, given): """ prob(given) Compute probability density (mass) function at `given` value. :param given: A Tensor. The value at which to evaluate probability density (mass) function. Must be able to broadcast to have a shape of ``(... + )batch_shape + value_shape``. :return: A Tensor of shape ``(... + )batch_shape[:-group_ndims]``. """ given = self._check_input_shape(given) p = self._prob(given) return tf.reduce_prod(p, tf.range(-self._group_ndims, 0))
def Flatten(layer): """ Handy function for flattening the result of a conv2D or maxpool2D to be used for a fully-connected (affine) layer. """ layer_shape = layer.get_shape() # num_features = tf.reduce_prod(tf.shape(layer)[1:]) num_features = layer_shape[1:].num_elements() layer_flat = tf.reshape(layer, [-1, num_features]) return layer_flat, num_features
def test_sum_prod_broadcast(self): # placeholder a = tf.placeholder(tf.float32, shape=[3, 4, 5, 6]) b = tf.placeholder(tf.float32, shape=[3, 4, 5]) a_sum = tf.reduce_sum(a, reduction_indices=[0, 3]) # shape (4, 5) b_prod = tf.reduce_prod(b, reduction_indices=[0, 1]) # shape (5,) f = a_sum + b_prod + b # (4, 5) + (5,) + (3, 4, 5) -> (3, 4, 5) # value feed_dict = dict() for x in [a, b]: feed_dict[x] = np.random.rand(*tf_obj_shape(x)) # test self.run(f, tf_feed_dict=feed_dict)
def get_output_for(self, input, **kwargs): # total_entries = tf.reduce_prod(tf.shape(input)) pre_shape = tf.shape(input)[:self.outdim - 1] to_flatten = tf.reduce_prod(tf.shape(input)[self.outdim - 1:]) return tf.reshape(input, tf.concat(axis=0, values=[pre_shape, tf.stack([to_flatten])]))
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars): old_p = old_dist_info_vars["p"] new_p = new_dist_info_vars["p"] ndims = old_p.get_shape().ndims return tf.reduce_prod(x_var * new_p / (old_p + TINY) + (1 - x_var) * (1 - new_p) / (1 - old_p + TINY), axis=ndims - 1)
def MaxPool_FwGrad(op, dx, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", _op_table=None, _grad_table=None): """Forward gradient operator for max pooling. Args: x: Input tensor, 4D tensor, [N, H, W, C]. dx: Gradient of the input tensor, 4D tensor, [N, H, W, C]. ksize: Kernel size of the max pooling operator, list of integers. strides: Strides of the max pooling operator, list of integers. padding: Padding, string, "SAME" or "VALID". data_format: "NHWC" or "NCHW". """ if dx is None: return None x = op.inputs[0] y = op.outputs[0] _, argmax = tf.nn.max_pool_with_argmax(x, ksize, strides, padding) dx_flat = tf.reshape(dx, [-1]) argmax_flat = tf.reshape(argmax, [-1]) y_zero = tf.zeros_like(y, dtype=argmax.dtype) x_shape = tf.cast(tf.shape(x), argmax.dtype) batch_dim = tf.reshape( tf.range( x_shape[0], dtype=argmax.dtype), [-1, 1, 1, 1]) nelem = tf.reduce_prod(x_shape[1:]) batch_dim *= nelem batch_dim += y_zero batch_dim = tf.reshape(batch_dim, [-1]) argmax_flat += batch_dim dx_sel = tf.gather(dx_flat, argmax_flat) dy = tf.reshape(dx_sel, tf.shape(argmax)) return dy
def ternary_decoder(encoded_data, scaler, shape): """Decoding the signs to float format """ a = tf.cast(encoded_data, tf.int32) a_split1 = tf.mod(a,4) a_split2 = tf.to_int32(tf.mod(a/4,4)) a_split3 = tf.to_int32(tf.mod(a/16,4)) a_split4 = tf.to_int32(tf.mod(a/64,4)) a = tf.concat([a_split1, a_split2, a_split3, a_split4], 0) real_size = tf.reduce_prod(shape) a = tf.to_float(a) a = tf.gather(a, tf.range(0,real_size)) a = tf.reshape(a, shape) a = tf.subtract(a, 1) decoded = a*scaler return decoded
def f_match_loss(y_out, y_gt, match, timespan, loss_fn, model=None): """Binary cross entropy with matching. Args: y_out: [B, N, H, W] or [B, N, D] y_gt: [B, N, H, W] or [B, N, D] match: [B, N, N] match_count: [B] timespan: N loss_fn: """ # N * [B, 1, H, W] y_out_list = tf.split(1, timespan, y_out) # N * [B, 1, N] match_list = tf.split(1, timespan, match) err_list = [None] * timespan shape = tf.shape(y_out) num_ex = tf.to_float(shape[0]) num_dim = tf.to_float(tf.reduce_prod(tf.to_float(shape[2:]))) sshape = tf.size(shape) # [B, N, M] => [B, N] match_sum = tf.reduce_sum(match, reduction_indices=[2]) # [B, N] => [B] match_count = tf.reduce_sum(match_sum, reduction_indices=[1]) match_count = tf.maximum(match_count, 1) for ii in range(timespan): # [B, 1, H, W] * [B, N, H, W] => [B, N, H, W] => [B, N] # [B, N] * [B, N] => [B] # [B] => [B, 1] red_idx = tf.range(2, sshape) err_list[ii] = tf.expand_dims( tf.reduce_sum( tf.reduce_sum(loss_fn(y_out_list[ii], y_gt), red_idx) * tf.reshape(match_list[ii], [-1, timespan]), [1]), 1) # N * [B, 1] => [B, N] => [B] err_total = tf.reduce_sum(tf.concat(1, err_list), reduction_indices=[1]) return tf.reduce_sum(err_total / match_count) / num_ex / num_dim
def get_normalized_gamma(size, filter_height, filter_width): """Get normalized gamma. Args: size: [B, T, 2] or [B, 2] or [2] filter_height: int filter_width: int Returns: lg_gamma: [B, T] or [B] or float """ rank = tf.shape(tf.shape(size)) filter_area = filter_height * filter_width area = tf.reduce_prod(size, rank - 1) lg_gamma = tf.log(float(filter_area)) - tf.log(area) return lg_gamma
def prod(self, x, axis=None, keepdims=False): '''Multiplies the values in a tensor, alongside the specified axis. ''' return tf.reduce_prod(x, reduction_indices=axis, keep_dims=keepdims)
def imagenet(self, image_feat, reuse=False): with tf.variable_scope('image_net', reuse=reuse) as scope: wd = tf.contrib.layers.l2_regularizer(self.weight_decay) image_fc1 = tf.contrib.layers.fully_connected(image_feat,4096, weights_regularizer=wd,scope='i_fc1') image_fc2 = tf.contrib.layers.fully_connected(image_fc1, self.num_class, activation_fn=None,weights_regularizer=wd, scope='i_fc2') prob = tf.reduce_mean(image_fc2,axis=1)#1-tf.reduce_prod(1-image_fc2,axis=1) self.endpoint['image_fc1'] = image_fc1 self.endpoint['image_fc2'] = image_fc2 self.endpoint['prob'] = prob return prob
def imagenet(self, image_feat, reuse=False): with tf.variable_scope('image_net', reuse=reuse) as scope: wd = tf.contrib.layers.l2_regularizer(self.weight_decay) image_fc1 = tf.contrib.layers.fully_connected(image_feat,4096, weights_regularizer=wd,scope='i_fc1') image_fc2 = tf.contrib.layers.fully_connected(image_fc1, 5000, activation_fn=tf.nn.sigmoid, weights_regularizer=wd, scope='i_fc2') prob = 1-tf.reduce_prod(1-image_fc2,axis=1) self.endpoint['image_fc1'] = image_fc1 self.endpoint['image_fc2'] = image_fc2 self.endpoint['prob'] = prob return prob
def flatten2d(inputs, name=None): """ Flatten tensor to two dimensions (batch_size, item_vector_size) """ x = tf.convert_to_tensor(inputs) dims = tf.reduce_prod(tf.shape(x)[1:]) x = tf.reshape(x, [-1, dims], name=name) return x
def multilinear(emb, tuples, l2=0): """ Compute the dot product of real vectors at selected embeddings Note that this model is called Cannonical Parafac (CP), and corresponds to the "distmult" model in some scientific publications on relational database factorization. :param emb: embedding matrix of size [n_emb, rank] containing float numbers :param tuples: tuple matrix of size [n_t, arity] containing integers :param l2: optional l2 regularization strength that is added to the score. If it is different from 0, the function returns a pair (pred, l2norm) where pred is the sample prediction, but l2norm is the l2 norm of the selected embeddings :return: the multilinear dot product between selected embeddings S[i] = sum_j prod_k E[I[i,k],j] >>> embeddings = [[1., 1, 0, 3], [0, 1, 0, 1], [-1, 1, 1, 5]] >>> idx = tf.Variable([[0, 1], [1, 0], [0, 2], [2, 0], [1, 2], [2, 1]]) >>> g = multilinear(embeddings, idx) >>> print(tf_eval(g)) [ 4. 4. 15. 15. 6. 6.] """ emb_sel = tf.gather(emb, tuples) pred = tf.reduce_sum(tf.reduce_prod(emb_sel, 1), 1) if l2 == 0: # unregularized prediction ==> returns only the predictions return pred else: # l2 regularization of the selected embeddings reg = l2 * tf.reduce_sum(tf.square(emb_sel)) return pred, reg
def multilinear_grad(emb: tf.Tensor, tuples: tf.Tensor, score=False) -> tf.Tensor: tuple_shape = [d.value for d in tuples.get_shape()] # if len(tuple_shape) > 2: # n = np.prod(tuple_shape[:-1]) # tuples = tf.reshape(tuples, (n, -1)) # n = tuples.get_shape()[0].value order = tuples.get_shape()[2].value rank = emb.get_shape()[-1].value if order == 2: if score: emb_sel = tf.gather(emb, tuples) grad_score = tf.reshape(tf.reverse(emb_sel, [False, False, True, False]), tuple_shape[:-1] + [2, rank]) prod = tf.reduce_prod(emb_sel, 2) preds = tf.reshape(tf.reduce_sum(prod, 2), tuple_shape[:-1]) return grad_score, preds raise NotImplementedError('Todo') # grad_score0 = tf.reverse(emb_sel, [False, True, False]) # reverse the row and column embeddings # prod = tf.reduce_prod(emb_sel, 1) # preds = tf.reshape(tf.reduce_sum(prod, 1), tuple_shape[:-1]) # # preds = tf.reshape(tf.reduce_sum(prod, 1), tuple_shape[:-1]) # else: # derivative of a product # prod = tf.reduce_prod(emb_sel, 1) # grad_score0 = tf.tile(tf.reshape(prod, (n, 1, rank)), (1, order, 1)) / emb_sel # grad_score = tf.reshape(grad_score0, tuple_shape + [rank]) # if score: # prod = tf.reduce_prod(emb_sel, 1) # preds = tf.reshape(tf.reduce_sum(prod, 1), tuple_shape[:-1]) # return grad_score, preds # else: # return grad_score
def corrupt(tensor, corruption_level=0.05): """Uses the masking noise algorithm to mask corruption_level proportion of the input. :param tensor: A tensor whose values are to be corrupted. :param corruption_level: An int [0, 1] specifying the probability to corrupt each value. :return: The corrupted tensor. """ total_samples = tf.reduce_prod(tf.shape(tensor)) corruption_matrix = tf.multinomial(tf.log([[corruption_level, 1 - corruption_level]]), total_samples) corruption_matrix = tf.cast(tf.reshape(corruption_matrix, shape=tf.shape(tensor)), dtype=tf.float32) return tf.mul(tensor, corruption_matrix)
def _area_loss(pred_bbox, img_size, presence): area = pred_bbox[..., 2] * pred_bbox[..., 3] ratio = area / tf.reduce_prod(tf.to_float(img_size)) weights = tf.clip_by_value(ratio, 1., 10.) ratio = tf.clip_by_value(ratio, 0., 1.) return _time_weighted_nll(1 - ratio, presence, weights)
def log_norm(expr_list, name): """ :param expr_list: :param name: :return: """ n_elems = 0 norm = 0. for e in expr_list: n_elems += tf.reduce_prod(tf.shape(e)) norm += tf.reduce_sum(e**2) norm /= tf.to_float(n_elems) tf.summary.scalar(name, norm) return norm
def minimize_clipped(optimizer, loss, clip_value, return_gvs=False, soft=False, **kwargs): """Computes a train_op with clipped gradients in the range [-clip_value, clip_value] :param optimizer: Tensorflow optimizer object :param loss: tensor :param clip_value: scalar value :param return_gvs: returns list of tuples of (gradient, parameter) for trainable variables :param kwargs: kwargs for optimizer.compute_gradients function :return: train_step """ gvs = optimizer.compute_gradients(loss, **kwargs) clipped_gvs = [(g, v) for (g, v) in gvs if g is not None] if not soft: clipped_gvs = [(tf.clip_by_value(g, -clip_value, clip_value), v) for (g, v) in clipped_gvs] else: n_elems = 0 norm_squared = 0. for g, v in gvs: n_elems += tf.reduce_prod(tf.shape(g)) norm_squared += tf.reduce_sum(g ** 2) norm_squared /= tf.to_float(n_elems) inv_norm = gen_math_ops.rsqrt(norm_squared) cond = tf.greater(norm_squared, clip_value ** 2) def clip(x): return tf.cond(cond, lambda: clip_value * x * inv_norm, lambda: x) clipped_gvs = [(clip(g), v) for (g, v) in clipped_gvs] train_step = optimizer.apply_gradients(clipped_gvs) if return_gvs: train_step = (train_step, gvs) return train_step
def _build_likelihood(self): return tf.reduce_sum(self.a) + sum(map(tf.reduce_prod, self.trainable_vars))
def Kuf(self, kern, Xnew): if isinstance(kern, kernels.RBF): with decors.params_as_tensors_for(kern): Xnew, _ = kern._slice(Xnew, None) Zmu, Zlen = kern._slice(self.Z, self.scales) idlengthscales = kern.lengthscales + Zlen d = self._cust_square_dist(Xnew, Zmu, idlengthscales) Kuf = tf.transpose(kern.variance * tf.exp(-d / 2) * tf.reshape(tf.reduce_prod(kern.lengthscales / idlengthscales, 1), (1, -1))) return Kuf else: raise NotImplementedError( "Multiscale features not implemented for `%s`." % str(type(kern)))
def Kuu(self, kern, jitter=0.0): if isinstance(kern, kernels.RBF): with decors.params_as_tensors_for(kern): Zmu, Zlen = kern._slice(self.Z, self.scales) idlengthscales2 = tf.square(kern.lengthscales + Zlen) sc = tf.sqrt( tf.expand_dims(idlengthscales2, 0) + tf.expand_dims(idlengthscales2, 1) - tf.square( kern.lengthscales)) d = self._cust_square_dist(Zmu, Zmu, sc) Kzz = kern.variance * tf.exp(-d / 2) * tf.reduce_prod(kern.lengthscales / sc, 2) Kzz += jitter * tf.eye(len(self), dtype=settings.float_type) return Kzz else: raise NotImplementedError( "Multiscale features not implemented for `%s`." % str(type(kern)))
def Linear_RBF_eKxzKzx(self, Ka, Kb, Z, Xmu, Xcov): Xcov = self._slice_cov(Xcov) Z, Xmu = self._slice(Z, Xmu) lin, rbf = (Ka, Kb) if isinstance(Ka, Linear) else (Kb, Ka) if not isinstance(lin, Linear): TypeError("{in_lin} is not {linear}".format(in_lin=str(type(lin)), linear=str(Linear))) if not isinstance(rbf, RBF): TypeError("{in_rbf} is not {rbf}".format(in_rbf=str(type(rbf)), rbf=str(RBF))) if lin.ARD or type(lin.active_dims) is not slice or type(rbf.active_dims) is not slice: raise NotImplementedError("Active dims and/or Linear ARD not implemented. " "Switching to quadrature.") D = tf.shape(Xmu)[1] M = tf.shape(Z)[0] N = tf.shape(Xmu)[0] if rbf.ARD: lengthscales = rbf.lengthscales else: lengthscales = tf.zeros((D, ), dtype=settings.float_type) + rbf.lengthscales lengthscales2 = lengthscales ** 2.0 const = rbf.variance * lin.variance * tf.reduce_prod(lengthscales) gaussmat = Xcov + tf.matrix_diag(lengthscales2)[None, :, :] # NxDxD det = tf.matrix_determinant(gaussmat) ** -0.5 # N cgm = tf.cholesky(gaussmat) # NxDxD tcgm = tf.tile(cgm[:, None, :, :], [1, M, 1, 1]) vecmin = Z[None, :, :] - Xmu[:, None, :] # NxMxD d = tf.matrix_triangular_solve(tcgm, vecmin[:, :, :, None]) # NxMxDx1 exp = tf.exp(-0.5 * tf.reduce_sum(d ** 2.0, [2, 3])) # NxM # exp = tf.Print(exp, [tf.shape(exp)]) vecplus = (Z[None, :, :, None] / lengthscales2[None, None, :, None] + tf.matrix_solve(Xcov, Xmu[:, :, None])[:, None, :, :]) # NxMxDx1 mean = tf.cholesky_solve( tcgm, tf.matmul(tf.tile(Xcov[:, None, :, :], [1, M, 1, 1]), vecplus)) mean = mean[:, :, :, 0] * lengthscales2[None, None, :] # NxMxD a = tf.matmul(tf.tile(Z[None, :, :], [N, 1, 1]), mean * exp[:, :, None] * det[:, None, None] * const, transpose_b=True) return a + tf.transpose(a, [0, 2, 1])