我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用tensorflow.conj()。
def __init__(self,x_op,y_op,sess,remove_bias=False): # Save parameters self.x_op = x_op self.y_op = y_op self.sess = sess self.remove_bias = remove_bias # Get dimensions and data types self.shape0 = x_op.get_shape() self.shape1 = y_op.get_shape() self.dtype0 = x_op.dtype self.dtype1 = y_op.dtype # Create the ops for the gradient. If the linear operator is y=F(x), # then z = y'*F(x). Therefore, dz/dx = F'(y). self.ytr_op = tf.placeholder(self.dtype1,self.shape1) self.z_op = tf.reduce_sum(tf.multiply(tf.conj(self.ytr_op),self.y_op)) self.zgrad_op = tf.gradients(self.z_op,self.x_op)[0] # Compute output at zero to subtract if self.remove_bias: xzero = np.zeros(self.shape0) self.y_bias = self.sess.run(self.y_op, feed_dict={self.x_op: xzero}) else: self.y_bias = 0
def _CplxMatMulGrad(op, grad): inp0 = tf.conj(op.inputs[0]) inp1 = tf.conj(op.inputs[1]) t_a = op.get_attr("transpose_a") t_b = op.get_attr("transpose_b") if not t_a and not t_b: return (math_ops.matmul( grad, inp1, transpose_b=True), math_ops.matmul( inp0, grad, transpose_a=True)) elif not t_a and t_b: return (math_ops.matmul(grad, inp1), math_ops.matmul( grad, inp0, transpose_a=True)) elif t_a and not t_b: return (math_ops.matmul( inp1, grad, transpose_b=True), math_ops.matmul(inp0, grad)) elif t_a and t_b: return (math_ops.matmul( inp1, grad, transpose_a=True, transpose_b=True), math_ops.matmul( grad, inp0, transpose_a=True, transpose_b=True))
def sparse_hermitian_product(emb, tuples): """ Compute the Hermitian inner product between selected complex embeddings This corresponds to the usual dot product applied on the conjugate of the first vector: <conj(x), y> where conj is the complex conjugate (obtained by inverting the imaginary part) We consider that the embedding dimension is twice the rank, where the first part is in embeddings[:,:rk] and the imaginary part is in embeddings[:,rk:]. It computes S[i] = <conj(E[I[i,1]], E[I[i,2]]> Usage: S = sparse_hermitian_product(E, I): :param emb: embedding matrix of size [n_emb, 2 * r] containing float numbers where r is the complex rank :param tuples: tuple matrix of size [n_t, 2] containing integers that correspond to the indices of the embeddings :return: a pair containing the real and imaginary parts of the Hermitian dot products """ rk = emb.get_shape()[1].value // 2 emb_re = emb[:, :rk] emb_im = emb[:, rk:] emb_sel_a_re = tf.gather(emb_re, tuples[:, 0]) emb_sel_a_im = tf.gather(emb_im, tuples[:, 0]) emb_sel_b_re = tf.gather(emb_re, tuples[:, 1]) emb_sel_b_im = tf.gather(emb_im, tuples[:, 1]) pred_re = tf.reduce_sum(tf.mul(emb_sel_a_re, emb_sel_b_re) + tf.mul(emb_sel_a_im, emb_sel_b_im), 1) pred_im = tf.reduce_sum(tf.mul(emb_sel_a_re, emb_sel_b_im) - tf.mul(emb_sel_a_im, emb_sel_b_re), 1) return pred_re, pred_im
def __init__(self, name, num_units): self.num_units = num_units self.re = tf.Variable(tf.random_uniform([num_units], minval=-1, maxval=1), name=name+"_re") self.im = tf.Variable(tf.random_uniform([num_units], minval=-1, maxval=1), name=name+"_im") self.v = tf.complex(self.re, self.im) # [num_units] # self.v = normalize(self.v) self.vstar = tf.conj(self.v) # [num_units] # [batch_sz, num_units]
def mul(self, z): v = tf.expand_dims(self.v, 1) # [num_units, 1] vstar = tf.conj(v) # [num_units, 1] vstar_z = tf.matmul(z, vstar) #[batch_size, 1] sq_norm = tf.reduce_sum(tf.abs(self.v)**2) # [1] factor = (2 / tf.complex(sq_norm, 0.0)) return z - factor * tf.matmul(vstar_z, tf.transpose(v)) # Permutation unitary matrix
def sparse_dot_product0(emb, tuples, use_matmul=True, output_type='real'): """ Compute the dot product of complex vectors. It uses complex vectors but tensorflow does not optimize in the complex space (or there is a bug in the gradient propagation with complex numbers...) :param emb: embeddings :param tuples: indices at which we compute dot products :return: scores (dot products) """ n_t = tuples.get_shape()[0].value rk = emb.get_shape()[1].value emb_sel_a = tf.gather(emb, tuples[:, 0]) emb_sel_b = tf.gather(emb, tuples[:, 1]) if use_matmul: pred_cplx = tf.squeeze(tf.batch_matmul( tf.reshape(emb_sel_a, [n_t, rk, 1]), tf.reshape(emb_sel_b, [n_t, rk, 1]), adj_x=True)) else: pred_cplx = tf.reduce_sum(tf.mul(tf.conj(emb_sel_a), emb_sel_b), 1) if output_type == 'complex': return pred_cplx elif output_type == 'real': return tf.real(pred_cplx) + tf.imag(pred_cplx) elif output_type == 'real': return tf.abs(pred_cplx) elif output_type == 'angle': raise NotImplementedError('No argument or inverse-tanh function for complex number in Tensorflow') else: raise NotImplementedError()
def refl_c(in_, normal_): normal_rk2 = tf.expand_dims( normal_, 1 ) scale = 2*tf.matmul( in_, tf.conj( normal_rk2 ) ) return in_ - tf.matmul(scale, tf.transpose(normal_rk2)) #get complex variable
def test_Conj(self): t = tf.conj(self.random(3, 4, complex=True)) self.check(t)