我们从Python开源项目中,提取了以下32个代码示例,用于说明如何使用tensorflow.complex()。
def __call__(self, inputs, state, scope=None ): zero_initer = tf.constant_initializer(0.) with tf.variable_scope(scope or type(self).__name__): #nick there are these two matrix multiplications and they are used to convert regular input sizes to complex outputs -- makes sense -- we can further modify this for lstm configurations mat_in = tf.get_variable('W_in', [self.input_size, self.state_size*2]) mat_out = tf.get_variable('W_out', [self.state_size*2, self.output_size]) in_proj = tf.matmul(inputs, mat_in) in_proj_c = tf.complex( in_proj[:, :self.state_size], in_proj[:, self.state_size:] ) out_state = modrelu_c( in_proj_c + ulinear_c(state,transform=self.transform), tf.get_variable(name='B', dtype=tf.float32, shape=[self.state_size], initializer=zero_initer) ) out_bias = tf.get_variable(name='B_out', dtype=tf.float32, shape=[self.output_size], initializer = zero_initer) out = tf.matmul( tf.concat(1,[tf.real(out_state), tf.imag(out_state)] ), mat_out ) + out_bias return out, out_state
def transform_spec_from_raw(raw): ''' Read raw features from TFRecords and shape them into spectrograms ''' spec = tf.decode_raw(raw, tf.float32) spec.set_shape([EmbeddingConfig.num_time_frames * EmbeddingConfig.num_freq_bins * 2]) spec = tf.reshape(spec, [-1, EmbeddingConfig.num_freq_bins * 2]) real, imag = tf.split(spec, [EmbeddingConfig.num_freq_bins, EmbeddingConfig.num_freq_bins], axis=1) orig_spec = tf.complex(real, imag) # orig_spec = librosa.feature.melspectrogram(S=orig_spec, n_mels=150) return orig_spec # shape: [time_frames, num_freq_bins]
def transform_spec_from_raw(raw): ''' Read raw features from TFRecords and shape them into spectrograms ''' spec = tf.decode_raw(raw, tf.float32) spec.set_shape([Config.num_time_frames * Config.num_freq_bins * 2]) spec = tf.reshape(spec, [-1, Config.num_freq_bins * 2]) real, imag = tf.split(spec, [Config.num_freq_bins, Config.num_freq_bins], axis=1) orig_spec = tf.complex(real, imag) return orig_spec # [num_time_frames, num_freq_bin]
def __init__(self, name, num_units): init_w = tf.random_uniform([num_units], minval=-np.pi, maxval=np.pi) self.w = tf.Variable(init_w, name=name) self.vec = tf.complex(tf.cos(self.w), tf.sin(self.w)) # [batch_sz, num_units]
def __init__(self, name, num_units): self.num_units = num_units self.re = tf.Variable(tf.random_uniform([num_units], minval=-1, maxval=1), name=name+"_re") self.im = tf.Variable(tf.random_uniform([num_units], minval=-1, maxval=1), name=name+"_im") self.v = tf.complex(self.re, self.im) # [num_units] # self.v = normalize(self.v) self.vstar = tf.conj(self.v) # [num_units] # [batch_sz, num_units]
def mul(self, z): v = tf.expand_dims(self.v, 1) # [num_units, 1] vstar = tf.conj(v) # [num_units, 1] vstar_z = tf.matmul(z, vstar) #[batch_size, 1] sq_norm = tf.reduce_sum(tf.abs(self.v)**2) # [1] factor = (2 / tf.complex(sq_norm, 0.0)) return z - factor * tf.matmul(vstar_z, tf.transpose(v)) # Permutation unitary matrix
def mul(self, z): return tf.transpose(tf.gather(tf.transpose(z), self.P)) # FFTs # z: complex[batch_sz, num_units]
def normalize(z): norm = tf.sqrt(tf.reduce_sum(tf.abs(z)**2)) factor = (norm + 1e-6) return tf.complex(tf.real(z) / factor, tf.imag(z) / factor) # z: complex[batch_sz, num_units] # bias: real[num_units]
def complex_mod_of_real(x): xshp = x.get_shape().as_list() assert xshp[1] % 2 == 0 xcplx = tf.complex(x[:, 0:xshp[1]/2], x[:, xshp[1]/2:]) return tf.complex_abs(xcplx)
def bound(x): bound = tf.maximum(tf.sqrt(tf.mul(tf.real(x), tf.real(x)) \ + tf.mul(tf.imag(x), tf.imag(x))), 1.0) return tf.complex(tf.real(x) / bound, tf.imag(x) / bound)
def _normalize(self, keys): # Normalize our keys to mod 1 if specified if self.complex_normalize: print 'normalizing via complex abs..' keys = HolographicMemory.normalize_real_by_complex_abs(keys) # Normalize our keys using the l2 norm if self.l2_normalize: print 'normalizing via l2..' keys = tf.nn.l2_normalize(keys, 1) return keys
def split_to_complex(x, xshp=None): xshp = x.get_shape().as_list() if xshp is None else xshp if len(xshp) == 2: assert xshp[1] % 2 == 0, \ "Vector is not evenly divisible into complex: %d" % xshp[1] mid = xshp[1] / 2 return tf.complex(x[:, 0:mid], x[:, mid:]) else: assert xshp[0] % 2 == 0, \ "Vector is not evenly divisible into complex: %d" % xshp[0] mid = xshp[0] / 2 return tf.complex(x[0:mid], x[mid:])
def unsplit_from_complex_ir(x): #return tf.concat(1, [tf.imag(x), tf.abs(tf.real(x))]) return tf.abs(tf.concat(1, [tf.imag(x), tf.real(x)])) #mag = tf.maximum(1.0, tf.complex_abs(x)) #x = tf.complex(tf.real(x) / (mag + 1e-10), tf.imag(x) / (mag + 1e-10)) # real = tf.concat(1, [tf.imag(x), tf.real(x)]) # return tf.abs(HolographicMemory.normalize_real_by_complex_abs([real])[0])
def complex_mul_real( z, r ): return tf.complex(tf.real(z)*r, tf.imag(z)*r)
def refl_c(in_, normal_): normal_rk2 = tf.expand_dims( normal_, 1 ) scale = 2*tf.matmul( in_, tf.conj( normal_rk2 ) ) return in_ - tf.matmul(scale, tf.transpose(normal_rk2)) #get complex variable
def get_variable_c( name, shape, initializer=None ): re = tf.get_variable(name+'_re', shape=shape, initializer=initializer) im = tf.get_variable(name+'_im', shape=shape, initializer=initializer) return tf.complex(re,im, name=name) #get unit complex numbers in polar form
def get_unit_variable_c( name, scope, shape ): theta = tf.get_variable(name, shape=shape, initializer = tf.random_uniform_initializer(-pi,pi) ) return tf.complex( tf.cos(theta), tf.sin(theta) )
def modrelu_c(in_c, bias): if not in_c.dtype.is_complex: raise(ValueError('modrelu_c: Argument in_c must be complex type')) if bias.dtype.is_complex: raise(ValueError('modrelu_c: Argument bias must be real type')) n = tf.complex_abs(in_c) scale = 1./(n+1e-5) return complex_mul_real(in_c, ( tf.nn.relu(n+bias)*scale ))
def __call__(self, inputs, state, scope=None ): with tf.variable_scope(scope or type(self).__name__): unitary_hidden_state, secondary_cell_hidden_state = tf.split(1,2,state) mat_in = tf.get_variable('mat_in', [self.input_size, self.state_size*2]) mat_out = tf.get_variable('mat_out', [self.state_size*2, self.output_size]) in_proj = tf.matmul(inputs, mat_in) in_proj_c = tf.complex(tf.split(1,2,in_proj)) out_state = modReLU( in_proj_c + ulinear(unitary_hidden_state, self.state_size), tf.get_variable(name='bias', dtype=tf.float32, shape=tf.shape(unitary_hidden_state), initializer = tf.constant_initalizer(0.)), scope=scope) with tf.variable_scope('unitary_output'): '''computes data linear, unitary linear and summation -- TODO: should be complex output''' unitary_linear_output_real = linear.linear([tf.real(out_state), tf.imag(out_state), inputs], True, 0.0) with tf.variable_scope('scale_nonlinearity'): modulus = tf.complex_abs(unitary_linear_output_real) rescale = tf.maximum(modulus + hidden_bias, 0.) / (modulus + 1e-7) #transition to data shortcut connection #out_ = tf.matmul(tf.concat(1,[tf.real(out_state), tf.imag(out_state), ] ), mat_out) + out_bias #hidden state is complex but output is completely real return out_, out_state #complex
def _SequentialBatchFFTGrad(op, grad): if (grad.dtype == tf.complex64): size = tf.cast(tf.shape(grad)[1], tf.float32) return (sequential_batch_ifft(grad, op.get_attr("compute_size")) * tf.complex(size, 0.)) else: size = tf.cast(tf.shape(grad)[1], tf.float64) return (sequential_batch_ifft(grad, op.get_attr("compute_size")) * tf.complex(size, tf.zeros([], tf.float64)))
def _SequentialBatchIFFTGrad(op, grad): if (grad.dtype == tf.complex64): rsize = 1. / tf.cast(tf.shape(grad)[1], tf.float32) return (sequential_batch_fft(grad, op.get_attr("compute_size")) * tf.complex(rsize, 0.)) else: rsize = 1. / tf.cast(tf.shape(grad)[1], tf.float64) return (sequential_batch_fft(grad, op.get_attr("compute_size")) * tf.complex(rsize, tf.zeros([], tf.float64)))
def random(self, *shapes, **kwargs): if all(isinstance(i, int) for i in shapes): if kwargs.get("complex", False): return (self.random(*shapes) + 1j * self.random(*shapes)).astype(np.complex64) else: return np.random.rand(*shapes) else: return tuple(self.random(*shape) for shape in shapes)
def test_Svd(self): t = tf.svd(self.random(4, 5, 3, 2).astype("float32")) self.check(t, ndigits=4, abs=True) # # complex number ops #
def test_Complex(self): t = tf.complex(*self.random((3, 4), (3, 4))) self.check(t)
def test_Conj(self): t = tf.conj(self.random(3, 4, complex=True)) self.check(t)
def test_Imag(self): t = tf.imag(tf.Variable(self.random(3, 4, complex=True))) self.check(t)
def test_FFT2D(self): # only defined for gpu if DEVICE == GPU: t = tf.fft2d(self.random(3, 4, complex=True)) self.check(t)
def test_IFFT2D(self): # only defined for gpu if DEVICE == GPU: t = tf.ifft2d(self.random(3, 4, complex=True)) self.check(t)
def test_FFT3D(self): # only defined for gpu if DEVICE == GPU: t = tf.fft3d(self.random(3, 4, 5, complex=True)) self.check(t)
def test_IFFT3D(self): # only defined for gpu if DEVICE == GPU: t = tf.ifft3d(self.random(3, 4, 5, complex=True)) self.check(t) # # reduction #
def call(self, inputs, state): """The most basic URNN cell. Args: inputs (Tensor - batch_sz x num_in): One batch of cell input. state (Tensor - batch_sz x num_units): Previous cell state: COMPLEX Returns: A tuple (outputs, state): outputs (Tensor - batch_sz x num_units*2): Cell outputs on the whole batch. state (Tensor - batch_sz x num_units): New state of the cell. """ #print("cell.call inputs:", inputs.shape, inputs.dtype) #print("cell.call state:", state.shape, state.dtype) # prepare input linear combination inputs_mul = tf.matmul(inputs, tf.transpose(self.w_ih)) # [batch_sz, 2*num_units] inputs_mul_c = tf.complex( inputs_mul[:, :self._num_units], inputs_mul[:, self._num_units:] ) # [batch_sz, num_units] # prepare state linear combination (always complex!) state_c = tf.complex( state[:, :self._num_units], state[:, self._num_units:] ) state_mul = self.D1.mul(state_c) state_mul = FFT(state_mul) state_mul = self.R1.mul(state_mul) state_mul = self.P.mul(state_mul) state_mul = self.D2.mul(state_mul) state_mul = IFFT(state_mul) state_mul = self.R2.mul(state_mul) state_mul = self.D3.mul(state_mul) # [batch_sz, num_units] # calculate preactivation preact = inputs_mul_c + state_mul # [batch_sz, num_units] new_state_c = modReLU(preact, self.b_h) # [batch_sz, num_units] C new_state = tf.concat([tf.real(new_state_c), tf.imag(new_state_c)], 1) # [batch_sz, 2*num_units] R # outside network (last dense layer) is ready for 2*num_units -> num_out output = new_state # print("cell.call output:", output.shape, output.dtype) # print("cell.call new_state:", new_state.shape, new_state.dtype) return output, new_state
def create_network(): dp = tflearn.data_preprocessing.DataPreprocessing() dp.add_featurewise_zero_center() dp.add_featurewise_stdnorm() #dp.add_samplewise_zero_center() #dp.add_samplewise_stdnorm() network = tflearn.input_data(shape=[None, chunk_size])#, data_preprocessing=dp) # input is a real signal network = tf.complex(network, 0.0) # fft the input input_fft = tf.fft(network) input_orig_fft = input_fft input_fft = tf.stack([tf.real(input_fft), tf.imag(input_fft)], axis=2) fft_size = int(input_fft.shape[1]) network = input_fft print("fft shape: " + str(input_fft.get_shape())) omg = fft_size nn_reg = None mask = network mask = tflearn.layers.fully_connected(mask, omg*2, activation="tanh", regularizer=nn_reg) mask = tflearn.layers.normalization.batch_normalization(mask) mask = tflearn.layers.fully_connected(mask, omg, activation="tanh", regularizer=nn_reg) mask = tflearn.layers.normalization.batch_normalization(mask) mask = tflearn.layers.fully_connected(mask, omg/2, activation="tanh", regularizer=nn_reg) mask = tflearn.layers.normalization.batch_normalization(mask) #mask = tflearn.layers.fully_connected(mask, omg/4, activation="tanh") mask = tflearn.reshape(mask, [-1, 1, omg/2]) mask = tflearn.layers.recurrent.lstm(mask, omg/4) mask = tflearn.layers.fully_connected(mask, omg/2, activation="tanh", regularizer=nn_reg) mask = tflearn.layers.normalization.batch_normalization(mask) mask = tflearn.layers.fully_connected(mask, omg, activation="tanh", regularizer=nn_reg) mask = tflearn.layers.normalization.batch_normalization(mask) mask = tflearn.layers.fully_connected(mask, omg*2, activation="tanh", regularizer=nn_reg) mask = tflearn.layers.normalization.batch_normalization(mask) mask = tflearn.layers.fully_connected(mask, omg, activation="sigmoid", regularizer=nn_reg) real = tf.multiply(tf.real(input_orig_fft), mask) imag = tf.multiply(tf.imag(input_orig_fft), mask) network = tf.real(tf.ifft(tf.complex(real, imag))) print("final shape: " + str(network.get_shape())) network = tflearn.regression(network, optimizer="adam", learning_rate=learning_rate, loss="mean_square") return network