我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.cos()。
def rotate_points(orig_points, angle, w, h): """Return rotated points Args: orig_points: 'Tensor' with shape [N,2], each entry is point (x,y) angle: rotate radians Returns: 'Tensor' with shape [N,2], with rotated points """ # rotation rotate_mat = tf.stack([[tf.cos(angle) / w, tf.sin(angle) / h], [-tf.sin(angle) / w, tf.cos(angle) / h]]) # shift coord orig_points = tf.subtract(orig_points, 0.5) orig_points = tf.stack([orig_points[:, 0] * w, orig_points[:, 1] * h], axis=1) print(orig_points) rotated_points = tf.matmul(orig_points, rotate_mat) + 0.5 return rotated_points
def random_rotation(img: tf.Tensor, max_rotation: float=0.1, crop: bool=True) -> tf.Tensor: # from SeguinBe with tf.name_scope('RandomRotation'): rotation = tf.random_uniform([], -max_rotation, max_rotation) rotated_image = tf.contrib.image.rotate(img, rotation, interpolation='BILINEAR') if crop: rotation = tf.abs(rotation) original_shape = tf.shape(rotated_image)[:2] h, w = original_shape[0], original_shape[1] # see https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders for formulae old_l, old_s = tf.cond(h > w, lambda: [h, w], lambda: [w, h]) old_l, old_s = tf.cast(old_l, tf.float32), tf.cast(old_s, tf.float32) new_l = (old_l * tf.cos(rotation) - old_s * tf.sin(rotation)) / tf.cos(2*rotation) new_s = (old_s - tf.sin(rotation) * new_l) / tf.cos(rotation) new_h, new_w = tf.cond(h > w, lambda: [new_l, new_s], lambda: [new_s, new_l]) new_h, new_w = tf.cast(new_h, tf.int32), tf.cast(new_w, tf.int32) bb_begin = tf.cast(tf.ceil((h-new_h)/2), tf.int32), tf.cast(tf.ceil((w-new_w)/2), tf.int32) rotated_image_crop = rotated_image[bb_begin[0]:h - bb_begin[0], bb_begin[1]:w - bb_begin[1], :] # If crop removes the entire image, keep the original image rotated_image = tf.cond(tf.equal(tf.size(rotated_image_crop), 0), true_fn=lambda: img, false_fn=lambda: rotated_image_crop) return rotated_image
def rotate_crop(img, rotation, crop=True, interpolation='NEAREST'): with tf.name_scope('RotateCrop'): rotated_image = tf_rotate(img, rotation, interpolation) if crop: rotation = tf.abs(rotation) original_shape = tf.shape(rotated_image)[:2] h, w = original_shape[0], original_shape[1] # see https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders for formulae old_l, old_s = tf.cond(h > w, lambda: [h, w], lambda: [w, h]) old_l, old_s = tf.cast(old_l, tf.float32), tf.cast(old_s, tf.float32) new_l = (old_l * tf.cos(rotation) - old_s * tf.sin(rotation)) / tf.cos(2 * rotation) new_s = (old_s - tf.sin(rotation) * new_l) / tf.cos(rotation) new_h, new_w = tf.cond(h > w, lambda: [new_l, new_s], lambda: [new_s, new_l]) new_h, new_w = tf.cast(new_h, tf.int32), tf.cast(new_w, tf.int32) bb_begin = tf.cast(tf.ceil((h - new_h) / 2), tf.int32), tf.cast(tf.ceil((w - new_w) / 2), tf.int32) rotated_image_crop = rotated_image[bb_begin[0]:h - bb_begin[0], bb_begin[1]:w - bb_begin[1], :] # If crop removes the entire image, keep the original image rotated_image = tf.cond(tf.equal(tf.size(rotated_image_crop), 0), true_fn=lambda: img, false_fn=lambda: rotated_image_crop) return rotated_image
def _get_rot_mat(self, ux_b, uy_b, uz_b): """ Returns a rotation matrix from axis and (encoded) angle.""" with tf.name_scope('get_rot_mat'): u_norm = tf.sqrt(tf.square(ux_b) + tf.square(uy_b) + tf.square(uz_b) + 1e-8) theta = u_norm # some tmp vars st_b = tf.sin(theta) ct_b = tf.cos(theta) one_ct_b = 1.0 - tf.cos(theta) st = st_b[:, 0] ct = ct_b[:, 0] one_ct = one_ct_b[:, 0] norm_fac = 1.0 / u_norm[:, 0] ux = ux_b[:, 0] * norm_fac uy = uy_b[:, 0] * norm_fac uz = uz_b[:, 0] * norm_fac trafo_matrix = self._stitch_mat_from_vecs([ct+ux*ux*one_ct, ux*uy*one_ct-uz*st, ux*uz*one_ct+uy*st, uy*ux*one_ct+uz*st, ct+uy*uy*one_ct, uy*uz*one_ct-ux*st, uz*ux*one_ct-uy*st, uz*uy*one_ct+ux*st, ct+uz*uz*one_ct]) return trafo_matrix
def DizzyLayerV2(X, rot_list, n): n_prime = int(n*(n-1)/2) thetas = tf.Variable(tf.random_uniform([n_prime, 1], 0, 2*math.pi), name="thetas") results = [X] k = 0 for sublist in rot_list: indices = [] values = [] for (a, b) in sublist: c = tf.cos(thetas[k]) s = tf.sin(thetas[k]) indices = indices + [[a, a], [a, b], [b, a], [b, b]] values = values + [c, s, -s, c] k += 1 shape = [n, n] v = tf.pack(tf.squeeze(values)) R = tf.SparseTensor(indices, v, shape) results.append(tf.sparse_tensor_dense_matmul(R, results[-1])) return results[-1]
def DizzyLayerV1(X, indices): n = int(X.get_shape()[0]) n_prime = int(n*(n-1)/2) thetas = tf.Variable(tf.random_uniform([n_prime, 1], 0, 2*math.pi), name="thetas") X_split = [X[k, :] for k in range(n)] for k in range(n_prime): (a, b) = indices[k] theta = thetas[k] c = tf.cos(theta) s = tf.sin(theta) v_1 = c*X_split[a]+s*X_split[b] v_2 = -s*X_split[a]+c*X_split[b] X_split[a] = v_1 X_split[b] = v_2 out = tf.pack(X_split) return out
def get_timing_signal(length, min_timescale=1, max_timescale=1e4, num_timescales=16): """Create Tensor of sinusoids of different frequencies. Args: length: Length of the Tensor to create, i.e. Number of steps. min_timescale: a float max_timescale: a float num_timescales: an int Returns: Tensor of shape (length, 2*num_timescales) """ positions = tf.to_float(tf.range(length)) log_timescale_increment = ( math.log(max_timescale / min_timescale) / (num_timescales - 1)) inv_timescales = min_timescale * tf.exp( tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0) return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
def mortletWavelet(scale, sampleCount): def waveEquation(time): return tf.exp(-1. * time ** 2. / 2.) * tf.cos(5. * time) # https://www.mathworks.com/help/wavelet/ref/morlet.html return waveletHelper(scale, sampleCount, waveEquation)
def cos(x): '''Computes cos of x element-wise. ''' return tf.cos(x)
def sin_and_cos(x, name="ignored"): return tf.concat(axis=len(x.get_shape()) - 1, values=[tf.sin(x), tf.cos(x)])
def gaussian(config, gan, net): z_dim = int(config.z) net = (net + 1) / 2 za = tf.slice(net, [0,0], [gan.batch_size(), z_dim//2]) zb = tf.slice(net, [0,z_dim//2], [gan.batch_size(), z_dim//2]) pi = np.pi ra = tf.sqrt(-2 * tf.log(za+TINY))*tf.cos(2*pi*zb) rb = tf.sqrt(-2 * tf.log(za+TINY))*tf.sin(2*pi*zb) return tf.reshape(tf.concat(axis=1, values=[ra, rb]), net.get_shape())
def __init__(self, args): with tf.device(args.device): def circle(x): spherenet = tf.square(x) spherenet = tf.reduce_sum(spherenet, 1) lam = tf.sqrt(spherenet) return x/tf.reshape(lam,[int(lam.get_shape()[0]), 1]) def modes(x): return tf.round(x*2)/2.0 if args.distribution == 'circle': x = tf.random_normal([args.batch_size, 2]) x = circle(x) elif args.distribution == 'modes': x = tf.random_uniform([args.batch_size, 2], -1, 1) x = modes(x) elif args.distribution == 'sin': x = tf.random_uniform((1, args.batch_size), -10.5, 10.5 ) x = tf.transpose(x) r_data = tf.random_normal((args.batch_size,1), mean=0, stddev=0.1) xy = tf.sin(0.75*x)*7.0+x*0.5+r_data*1.0 x = tf.concat([xy,x], 1)/16.0 elif args.distribution == 'arch': offset1 = tf.random_uniform((1, args.batch_size), -10, 10 ) xa = tf.random_uniform((1, 1), 1, 4 ) xb = tf.random_uniform((1, 1), 1, 4 ) x1 = tf.random_uniform((1, args.batch_size), -1, 1 ) xcos = tf.cos(x1*np.pi + offset1)*xa xsin = tf.sin(x1*np.pi + offset1)*xb x = tf.transpose(tf.concat([xcos,xsin], 0))/16.0 self.x = x self.xy = tf.zeros_like(self.x)
def compute_mse_loss(x, xhat, hparams): """MSE loss function. Args: x: Input data tensor. xhat: Reconstruction tensor. hparams: Hyperparameters. Returns: total_loss: MSE loss scalar. """ with tf.name_scope("Losses"): if hparams.raw_audio: total_loss = tf.reduce_mean((x - xhat)**2) else: # Magnitude m = x[:, :, :, 0] if hparams.cost_phase_mask else 1.0 fm = utils.frequency_weighted_cost_mask( hparams.fw_loss_coeff, hz_flat=hparams.fw_loss_cutoff, n_fft=hparams.n_fft) mag_loss = tf.reduce_mean(fm * (x[:, :, :, 0] - xhat[:, :, :, 0])**2) if hparams.mag_only: total_loss = mag_loss else: # Phase if hparams.dphase: phase_loss = tf.reduce_mean(fm * m * (x[:, :, :, 1] - xhat[:, :, :, 1])**2) else: # Von Mises Distribution "Circular Normal" # Added constant to keep positive (Same Probability) range [0, 2] phase_loss = 1 - tf.reduce_mean(fm * m * tf.cos( (x[:, :, :, 1] - xhat[:, :, :, 1]) * np.pi)) total_loss = mag_loss + hparams.phase_loss_coeff * phase_loss tf.summary.scalar("Loss/Mag", mag_loss) tf.summary.scalar("Loss/Phase", phase_loss) tf.summary.scalar("Loss/Total", total_loss) return total_loss
def wormhole(tensor, shape, kink, input_stride, alpha=1.0): """ Apply per-pixel field flow. Non-iterative. :param Tensor tensor: :param list[int] shape: :param float kink: Path twistiness :param float input_stride: Maximum pixel offset :return: Tensor """ height, width, channels = shape values = value_map(tensor, shape) degrees = values * 360.0 * math.radians(1) * kink # stride = values * height * input_stride stride = height * input_stride x_index = tf.cast(row_index(shape), tf.float32) y_index = tf.cast(column_index(shape), tf.float32) x_offset = (tf.cos(degrees) + 1) * stride y_offset = (tf.sin(degrees) + 1) * stride x = tf.cast(x_index + x_offset, tf.int32) % width y = tf.cast(y_index + y_offset, tf.int32) % height luminosity = tf.square(tf.reshape(values, [height, width, 1])) out = normalize(tf.scatter_nd(offset_index(y, height, x, width), tensor * luminosity, tf.shape(tensor))) return blend(tensor, tf.sqrt(out), alpha)
def _cosine_components(a, b, g): # This guy is great http://paulbourke.net/miscellaneous/interpolation/ g2 = (1 - tf.cos(g * math.pi)) / 2 return a * (1 - g2), b * g2
def sin_and_cos(x, name="ignored"): return tf.concat(len(x.get_shape()) - 1, [tf.sin(x), tf.cos(x)])
def _transformation(self, XP): """Build the kernel feature space transformation.""" real = tf.cos(XP) imag = tf.sin(XP) Net = tf.concat([real, imag], axis=-1) / np.sqrt(self.n_features) return Net
def cos(self, x): '''Computes cos of x element-wise. ''' return tf.cos(x)
def cos(x): """Computes cos of x element-wise. # Arguments x: input tensor. # Returns A tensor. """ return tf.cos(x)
def testCplxCosGPU(self): shapes = [(5,4,3), (5,4), (5,), (1,)] for sh in shapes: x = ((np.random.randn(*sh) + 1j*np.random.randn(*sh)).astype(np.complex64)) self._compareGpu(x, np.cos, tf.cos)
def testCplxCosGradGPU(self): shapes = [(5,4,3), (5,4), (5,), (1,)] for sh in shapes: x = ((np.random.randn(*sh) + 1j*np.random.randn(*sh)).astype(np.complex64)) self._compareGpuGrad(x, np.cos, tf.cos)
def call(self, inputs): k1 = tf.matmul(tf.cos(inputs), self.k1 * tf.cos(self.mu)) k2 = tf.matmul(tf.sin(inputs), self.k2 * tf.sin(self.mu)) # Defines the two model formulations: "glm" vs "gvm". if self.model_type == 'glm': return tf.exp(k1 + k2 + self.k0) else: return tf.nn.softplus(self.b) + self.g * tf.exp(k1 + k2)
def K(self, X, X2=None, presliced=False): if not presliced: X, X2 = self._slice(X, X2) r = self.euclid_dist(X, X2) return self.variance * tf.cos(r)
def _J(self, theta): """ Implements the order dependent family of functions defined in equations 4 to 7 in the reference paper. """ if self.order == 0: return np.pi - theta elif self.order == 1: return tf.sin(theta) + (np.pi - theta) * tf.cos(theta) elif self.order == 2: return 3. * tf.sin(theta) * tf.cos(theta) + \ (np.pi - theta) * (1. + 2. * tf.cos(theta) ** 2)
def setUp(self): super(CoreUnaryOpsTest, self).setUp() self.ops = [ ('abs', operator.abs, tf.abs, core.abs_function), ('neg', operator.neg, tf.neg, core.neg), # TODO(shoyer): add unary + to core TensorFlow ('pos', None, None, None), ('sign', None, tf.sign, core.sign), ('reciprocal', None, tf.reciprocal, core.reciprocal), ('square', None, tf.square, core.square), ('round', None, tf.round, core.round_function), ('sqrt', None, tf.sqrt, core.sqrt), ('rsqrt', None, tf.rsqrt, core.rsqrt), ('log', None, tf.log, core.log), ('exp', None, tf.exp, core.exp), ('log', None, tf.log, core.log), ('ceil', None, tf.ceil, core.ceil), ('floor', None, tf.floor, core.floor), ('cos', None, tf.cos, core.cos), ('sin', None, tf.sin, core.sin), ('tan', None, tf.tan, core.tan), ('acos', None, tf.acos, core.acos), ('asin', None, tf.asin, core.asin), ('atan', None, tf.atan, core.atan), ('lgamma', None, tf.lgamma, core.lgamma), ('digamma', None, tf.digamma, core.digamma), ('erf', None, tf.erf, core.erf), ('erfc', None, tf.erfc, core.erfc), ('lgamma', None, tf.lgamma, core.lgamma), ] total_size = np.prod([v.size for v in self.original_lt.axes.values()]) self.test_lt = core.LabeledTensor( tf.cast(self.original_lt, tf.float32) / total_size, self.original_lt.axes)
def get_unit_variable_c( name, scope, shape ): theta = tf.get_variable(name, shape=shape, initializer = tf.random_uniform_initializer(-pi,pi) ) return tf.complex( tf.cos(theta), tf.sin(theta) )
def _get_rot_mat_x_hom(angle): """ Returns a 3D rotation matrix in homogeneous coords. """ one_vec = tf.ones_like(angle) zero_vec = one_vec*0.0 trafo_matrix = _stitch_mat_from_vecs([one_vec, zero_vec, zero_vec, zero_vec, zero_vec, tf.cos(angle), -tf.sin(angle), zero_vec, zero_vec, tf.sin(angle), tf.cos(angle), zero_vec, zero_vec, zero_vec, zero_vec, one_vec]) return trafo_matrix
def _get_rot_mat_y_hom(angle): """ Returns a 3D rotation matrix in homogeneous coords. """ one_vec = tf.ones_like(angle) zero_vec = one_vec*0.0 trafo_matrix = _stitch_mat_from_vecs([tf.cos(angle), zero_vec, tf.sin(angle), zero_vec, zero_vec, one_vec, zero_vec, zero_vec, -tf.sin(angle), zero_vec, tf.cos(angle), zero_vec, zero_vec, zero_vec, zero_vec, one_vec]) return trafo_matrix
def _get_rot_mat_z_hom(angle): """ Returns a 3D rotation matrix in homogeneous coords. """ one_vec = tf.ones_like(angle) zero_vec = one_vec*0.0 trafo_matrix = _stitch_mat_from_vecs([tf.cos(angle), -tf.sin(angle), zero_vec, zero_vec, tf.sin(angle), tf.cos(angle), zero_vec, zero_vec, zero_vec, zero_vec, one_vec, zero_vec, zero_vec, zero_vec, zero_vec, one_vec]) return trafo_matrix
def _get_rot_mat_y(angle): """ Returns a 3D rotation matrix. """ one_vec = tf.ones_like(angle) zero_vec = one_vec*0.0 trafo_matrix = _stitch_mat_from_vecs([tf.cos(angle), zero_vec, -tf.sin(angle), zero_vec, one_vec, zero_vec, tf.sin(angle), zero_vec, tf.cos(angle)]) return trafo_matrix
def _get_rot_mat_z(angle): """ Returns a 3D rotation matrix. """ one_vec = tf.ones_like(angle) zero_vec = one_vec*0.0 trafo_matrix = _stitch_mat_from_vecs([tf.cos(angle), tf.sin(angle), zero_vec, -tf.sin(angle), tf.cos(angle), zero_vec, zero_vec, zero_vec, one_vec]) return trafo_matrix
def buildRotations(n, rand_or_identity,num_rots=None): print("num_rots: %d" %num_rots) num_rots = num_rots or (n-1) n_prime = int(n*(n-1)//2*num_rots/(n-1)) outputs = [] with vs.variable_scope("Build_Rotations"): (indices, values_idxs) = rotationPreprocess(n, num_rots) if rand_or_identity: print("Initialization: Random") thetas = vs.get_variable(initializer=tf.random_uniform([n_prime, 1], 0, 2*math.pi), name="Thetas_RandInit", dtype=tf.float32) else: print("Initialization: Identity") thetas = vs.get_variable(initializer=tf.zeros([n_prime, 1]), name="Thetas_OnesInit", dtype=tf.float32) cos = tf.cos(thetas) sin = tf.sin(thetas) nsin = tf.neg(sin) thetas_concat = tf.concat(0, [cos,sin,nsin]) gathered_values = tf.squeeze(tf.gather(thetas_concat, values_idxs)) shape = tf.constant([n, n], dtype=tf.int64) splt_values = tf.split(0, num_rots, gathered_values) splt_indices = tf.split(0, num_rots, indices) shape = tf.constant([n,n], dtype=tf.int64) for i in range(num_rots): curr_indices = splt_indices[i] curr_values = splt_values[i] sparse_rot = tf.SparseTensor(indices=curr_indices, values=curr_values, shape=shape) outputs.append(sparse_rot) print("buildRotations output length: %d" % len(outputs)) return outputs
def rotationTransform(X, n, scope, num_rots=None): num_rots = num_rots or (n-1) n_prime = int(n*(n-1)//2*num_rots/(n-1)) outputs = [] with vs.variable_scope(scope or "RotationTransform"): for i, (name, x) in enumerate(X): (indices, values_idxs) = rotationPreprocess(n, num_rots) thetas = vs.get_variable(initializer=tf.random_uniform([n_prime, 1], 0, 2*math.pi), name="Thetas"+str(i)+name, dtype=tf.float32) cos = tf.cos(thetas) sin = tf.sin(thetas) nsin = tf.neg(sin) thetas_concat = tf.concat(0, [cos,sin,nsin]) gathered_values = tf.squeeze(tf.gather(thetas_concat, values_idxs)) shape = tf.constant([n, n], dtype=tf.int64) splt_values = tf.split(0, num_rots, gathered_values) splt_indices = tf.split(0, num_rots, indices) shape = tf.constant([n,n], dtype=tf.int64) for i in range(num_rots): curr_indices = splt_indices[i] curr_values = splt_values[i] sparse_rot = tf.SparseTensor(indices=curr_indices, values=curr_values, shape=shape) x = tf.sparse_tensor_dense_matmul(sparse_rot, x) outputs.append(x) return outputs
def lat_long_to_xyz(S, T): x = tf.cos(T) * tf.sin(S) y = tf.sin(T) z = tf.cos(T) * tf.cos(S) return x, y, z
def backproject(S, T, depth): # Convert to Cartesian for modified depth input. # depth = sqrt(x^2 + z^2). x = depth * tf.sin(S) y = depth * tf.tan(T) z = depth * tf.cos(S) return x, y, z
def add_timing_signal(x, min_timescale=1.0, max_timescale=1.0e4, name=None): """ This function adds a bunch of sinusoids of different frequencies to a Tensor. See paper: Attention is all you need :param x: A tensor with shape [batch, length, channels] :param min_timescale: A floating point number :param max_timescale: A floating point number :param name: An optional string :returns: a Tensor the same shape as x. """ with tf.name_scope(name, default_name="add_timing_signal", values=[x]): length = tf.shape(x)[1] channels = tf.shape(x)[2] position = tf.to_float(tf.range(length)) num_timescales = channels // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (tf.to_float(num_timescales) - 1) ) inv_timescales = min_timescale * tf.exp( tf.to_float(tf.range(num_timescales)) * -log_timescale_increment ) scaled_time = (tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]]) signal = tf.reshape(signal, [1, length, channels]) return x + signal
def cos(x): """Computes cos of x element-wise. # Arguments x: Tensor or variable. # Returns A tensor. """ return tf.cos(x)
def add_timing_signal(x, min_timescale=1, max_timescale=1e4, num_timescales=16): """Adds a bunch of sinusoids of different frequencies to a Tensor. This allows attention to learn to use absolute and relative positions. The timing signal should be added to some precursor of both the source and the target of the attention. The use of relative position is possible because sin(x+y) and cos(x+y) can be experessed in terms of y, sin(x) and cos(x). In particular, we use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the depth dimension, padded with zeros to be the same depth as the input, and added into input. Args: x: a Tensor with shape [?, length, ?, depth] min_timescale: a float max_timescale: a float num_timescales: an int <= depth/2 Returns: a Tensor the same shape as x. """ length = shape_list(x)[1] depth = shape_list(x)[3] signal = get_timing_signal(length, min_timescale, max_timescale, num_timescales) padded_signal = tf.pad(signal, [[0, 0], [0, depth - 2 * num_timescales]]) return x + tf.reshape(padded_signal, [1, length, 1, depth])
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): """Adds a bunch of sinusoids of different frequencies to a Tensor. Each channel of the input Tensor is incremented by a sinusoid of a different frequency and phase. This allows attention to learn to use absolute and relative positions. Timing signals should be added to some precursors of both the query and the memory inputs to attention. The use of relative position is possible because sin(x+y) and cos(x+y) can be experessed in terms of y, sin(x) and cos(x). In particular, we use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. The number of different timescales is equal to channels / 2. For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the channels dimension. Args: x: a Tensor with shape [batch, length, channels] min_timescale: a float max_timescale: a float Returns: a Tensor the same shape as x. """ length = common_layers.shape_list(x)[1] channels = common_layers.shape_list(x)[2] signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) return x + signal
def add_timing_signal_1d_given_position(x, position, min_timescale=1.0, max_timescale=1.0e4): """Adds sinusoids of diff frequencies to a Tensor, with timing position given. Args: x: a Tensor with shape [batch, length, channels] position: a Tensor with shape [batch, length] min_timescale: a float max_timescale: a float Returns: a Tensor the same shape as x. """ channels = common_layers.shape_list(x)[2] num_timescales = channels // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (tf.to_float(num_timescales) - 1)) inv_timescales = min_timescale * tf.exp( tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = ( tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims( tf.expand_dims(inv_timescales, 0), 0)) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2) signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]]) return x + signal
def learning_rate_decay(hparams, num_worker_replicas=1, num_train_steps=1): """Inverse-decay learning rate until warmup_steps, then decay.""" warmup_steps = tf.to_float( hparams.learning_rate_warmup_steps * num_worker_replicas) step = tf.to_float(tf.train.get_or_create_global_step()) if hparams.learning_rate_decay_scheme == "noam": return 5000.0 * hparams.hidden_size**-0.5 * tf.minimum( (step + 1) * warmup_steps**-1.5, (step + 1)**-0.5) elif hparams.learning_rate_decay_scheme == "exp100k": return 0.94**(step // 100000) elif hparams.learning_rate_decay_scheme == "cosine": cycle_steps = hparams.learning_rate_cosine_cycle_steps return 0.5 * (1 + tf.cos(np.pi * (step % cycle_steps) / cycle_steps)) elif hparams.learning_rate_decay_scheme == "cyclelinear10x": # Cycle the rate linearly by 10x every warmup_steps, up and down. cycle_steps = hparams.learning_rate_warmup_steps cycle_position = step % (2 * cycle_steps) cycle_position = tf.to_float( # Normalize to the interval [-1, 1]. cycle_position - cycle_steps) / float(cycle_steps) cycle_position = 1.0 - tf.abs(cycle_position) # 0 to 1 and back to 0. return (cycle_position + 0.1) * 3.0 # 10x difference each cycle (0.3-3). inv_base = tf.exp(tf.log(0.01) / warmup_steps) inv_decay = inv_base**(warmup_steps - step) if hparams.learning_rate_decay_scheme == "sqrt": decay = _sqrt_decay(step - warmup_steps) elif hparams.learning_rate_decay_scheme == "exp10k": decay = _exp_decay_after(step - warmup_steps, 0.9995, num_train_steps - warmup_steps - 10000) elif hparams.learning_rate_decay_scheme == "exp50k": decay = _exp_decay_after(step - warmup_steps, 0.99995, num_train_steps - warmup_steps - 50000) elif hparams.learning_rate_decay_scheme == "exp500k": decay = _exp_decay_after(step - warmup_steps, 0.9999955, num_train_steps - warmup_steps - 500000) elif hparams.learning_rate_decay_scheme == "none": decay = tf.constant(1.0) else: raise ValueError("Unrecognized learning rate decay scheme: %s" % hparams.learning_rate_decay_scheme) return tf.where(step < warmup_steps, inv_decay, decay)
def test_Cos(self): t = tf.cos(self.random(4, 3)) self.check(t)
def ripple(tensor, shape, freq, displacement=1.0, kink=1.0, reference=None, spline_order=3): """ Apply displacement from pixel radian values. :param Tensor tensor: An image tensor. :param list[int] shape: :param list[int] freq: Displacement frequency :param float displacement: :param float kink: :param Tensor reference: An optional displacement map. :param int spline_order: Ortho offset spline point count. 0=Constant, 1=Linear, 2=Cosine, 3=Bicubic :return: Tensor """ height, width, channels = shape x0_index = row_index(shape) y0_index = column_index(shape) value_shape = [shape[0], shape[1], 1] if reference is None: reference = resample(tf.random_uniform([freq[0], freq[1], 1]), value_shape, spline_order=spline_order) # reference = derivative(reference, [shape[0], shape[1], 1], with_normalize=False) # Twist index, borrowed from worms. TODO merge me. index = value_map(reference, shape) * 360.0 * math.radians(1) * kink reference_x = (tf.cos(index) * displacement * width) % width reference_y = (tf.sin(index) * displacement * height) % height # Bilinear interpolation of midpoints, borrowed from refract(). TODO merge me x0_offsets = (tf.cast(reference_x, tf.int32) + x0_index) % width x1_offsets = (x0_offsets + 1) % width y0_offsets = (tf.cast(reference_y, tf.int32) + y0_index) % height y1_offsets = (y0_offsets + 1) % height x0_y0 = tf.gather_nd(tensor, tf.stack([y0_offsets, x0_offsets], 2)) x1_y0 = tf.gather_nd(tensor, tf.stack([y0_offsets, x1_offsets], 2)) x0_y1 = tf.gather_nd(tensor, tf.stack([y1_offsets, x0_offsets], 2)) x1_y1 = tf.gather_nd(tensor, tf.stack([y1_offsets, x1_offsets], 2)) x_fract = tf.reshape(reference_x - tf.floor(reference_x), [height, width, 1]) y_fract = tf.reshape(reference_y - tf.floor(reference_y), [height, width, 1]) x_y0 = blend(x0_y0, x1_y0, x_fract) x_y1 = blend(x0_y1, x1_y1, x_fract) return blend(x_y0, x_y1, y_fract)
def lat_long_to_rectilinear_uv(K, S, T): # Convert to Cartesian. x = tf.cos(T) * tf.sin(S) y = tf.sin(T) z = tf.cos(T) * tf.cos(S) argmax = tf.argmax(tf.abs([x, y, z]), axis = 0) # Check which face the ray lies on. front_check = tf.logical_and( tf.equal(argmax, 2), tf.greater_equal(z, 0.0) ) back_check = tf.logical_and( tf.equal(argmax, 2), tf.less(z, 0.0) ) left_check = tf.logical_and( tf.equal(argmax, 0), tf.less(x, 0.0) ) right_check = tf.logical_and( tf.equal(argmax, 0), tf.greater_equal(x, 0.0) ) up_check = tf.logical_and( tf.equal(argmax, 1), tf.less(y, 0.0) ) down_check = tf.logical_and( tf.equal(argmax, 1), tf.greater_equal(y, 0.0) ) def project_u(x, y, z, offset): return offset + 0.5 + (K[2] + K[0] * x / z) / 2.0 def project_v(x, y, z): return 0.5 + (K[3] + K[1] * y / z) / 2.0 # Calculate UV coordinates. u = tf.where(front_check, project_u(x, y, z, 0.0), tf.zeros_like(x)) u = tf.where(back_check, project_u(x, -y, z, 1.0), u) u = tf.where(left_check, project_u(z, y, -x, 2.0), u) u = tf.where(right_check, project_u(-z, y, x, 3.0), u) u = tf.where(up_check, project_u(x, z, -y, 4.0), u) u = tf.where(down_check, project_u(x, -z, y, 5.0), u) u = u / 6.0 v = tf.where(front_check, project_v(x, y, z), tf.zeros_like(y)) v = tf.where(back_check, project_v(x, -y, z), v) v = tf.where(left_check, project_v(z, y, -x), v) v = tf.where(right_check, project_v(-z, y, x), v) v = tf.where(up_check, project_v(x, z, -y), v) v = tf.where(down_check, project_v(x, -z, y), v) return u, v
def lat_long_to_cube_uv(S, T): # Convert to Cartesian. x = tf.cos(T) * tf.sin(S) y = tf.sin(T) z = tf.cos(T) * tf.cos(S) argmax = tf.argmax(tf.abs([x, y, z]), axis = 0) max = tf.reduce_max(tf.abs([x, y, z]), axis = 0) # Check which face the ray lies on. front_check = tf.logical_and( tf.equal(argmax, 2), tf.greater_equal(z, 0.0) ) back_check = tf.logical_and( tf.equal(argmax, 2), tf.less(z, 0.0) ) left_check = tf.logical_and( tf.equal(argmax, 0), tf.less(x, 0.0) ) right_check = tf.logical_and( tf.equal(argmax, 0), tf.greater_equal(x, 0.0) ) up_check = tf.logical_and( tf.equal(argmax, 1), tf.less(y, 0.0) ) down_check = tf.logical_and( tf.equal(argmax, 1), tf.greater_equal(y, 0.0) ) # Normalize coordinates. x = x / max y = y / max z = z / max # Calculate UV coordinates. u = tf.where(front_check, 0.5 + x / 2.0, tf.zeros_like(x)) u = tf.where(back_check, 1.0 + (0.5 - x / 2.0), u) u = tf.where(left_check, 2.0 + (0.5 + z / 2.0), u) u = tf.where(right_check, 3.0 + (0.5 - z / 2.0), u) u = tf.where(up_check, 4.0 + (0.5 + x / 2.0), u) u = tf.where(down_check, 5.0 + (0.5 + x / 2.0), u) u = u / 6.0 v = tf.where(front_check, (1.0 + y) / 2.0, tf.zeros_like(y)) v = tf.where(back_check, (1.0 + y) / 2.0, v) v = tf.where(left_check, (1.0 + y) / 2.0, v) v = tf.where(right_check, (1.0 + y) / 2.0, v) v = tf.where(up_check, (1.0 + z) / 2.0, v) v = tf.where(down_check, (1.0 - z) / 2.0, v) return u, v