我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用chainer.functions.crelu()。
def decode(self,z): # pdb.set_trace() a = self.a_enc # If this function is coming from the sampling call, the batch size of z and a won't match. Manually handle that here. if (a.shape[0]!=z.shape[0]): a.volatile = 'ON' batch_size = z.shape[0] a.data = a.data[0:batch_size,:] net_input = F.concat((z,a), axis=1) h = F.crelu(self.plinx0(net_input)) for i in range(self.num_layers-1): layer_name = 'plinx' + str(i+1) h = F.crelu(self[layer_name](h)) self.pmu = self.plinx_mu(h) self.pln_var = self.plinx_ln_var(h) return self.pmu, self.pln_var
def encode_a(self, x): a_params = self.qlina0(x) a_params = self.qlina_batch_norm_0(a_params) a_params = F.crelu(a_params) for i in range(self.num_layers-1): layer_name = 'qlina' + str(i+1) a_params = self[layer_name](a_params) layer_name = 'qlina_batch_norm_' + str(i+1) a_params = self[layer_name](a_params) a_params = F.crelu(a_params) self.qmu_a = self.qlina_mu(a_params) self.qln_var_a = self.qlina_ln_var(a_params) return self.qmu_a, self.qln_var_a
def encode_z(self, x, a): # a = F.gaussian(self.qmu_a, self.qln_var_a) # This should be outside the encoding function. Pass the function a. net_input = F.concat((x,a), axis=1) h = self.qlinz0(net_input) h = self.qlinz_batch_norm_0(h) h = F.crelu(h) for i in range(self.num_layers-1): layer_name = 'qlinz' + str(i+1) h = self[layer_name](h) layer_name = 'qlinz_batch_norm_' + str(i+1) h = self[layer_name](h) h = F.crelu(h) self.qmu_z = self.qlinz_mu(h) self.qln_var_z = self.qlinz_ln_var(h) return self.qmu_z, self.qln_var_z
def decode_a(self, z, x): net_input = F.concat((x,z), axis=1) h = self.plina0(net_input) h = self.plina_batch_norm_0(h) h = F.crelu(h) for i in range(self.num_layers-1): layer_name = 'plina' + str(i+1) h = self[layer_name](h) layer_name = 'plina_batch_norm_' + str(i+1) h = self[layer_name](h) h = F.crelu(h) self.pmu_a = self.plina_mu(h) self.pln_var_a = self.plina_ln_var(h) return self.pmu_a, self.pln_var_a
def decode_a(self, z): # net_input = F.concat((x,z), axis=1) h = self.plina0(z) h = self.plina_batch_norm_0(h) h = F.crelu(h) for i in range(self.num_layers-1): layer_name = 'plina' + str(i+1) h = self[layer_name](h) layer_name = 'plina_batch_norm_' + str(i+1) h = self[layer_name](h) h = F.crelu(h) self.pmu_a = self.plina_mu(h) self.pln_var_a = self.plina_ln_var(h) return self.pmu_a, self.pln_var_a
def encode(self, x): h = self.qlin0(x) h = self.qlin_batch_norm_0(h) h = F.crelu(h) for i in range(self.num_layers-1): layer_name = 'qlin' + str(i+1) h = self[layer_name](h) layer_name = 'qlin_batch_norm_' + str(i+1) h = self[layer_name](h) h = F.crelu(h) self.qmu = self.qlin_mu(h) self.qln_var = self.qlin_ln_var(h) self.qh_vec_0 = self.qlin_h_vec_0(h) return self.qmu, self.qln_var, self.qh_vec_0
def decode(self, z): h = self.plin0(z) h = self.plin_batch_norm_0(h) h = F.crelu(h) for i in range(self.num_layers-1): layer_name = 'plin' + str(i+1) h = self[layer_name](h) layer_name = 'plin_batch_norm_' + str(i+1) h = self[layer_name](h) h = F.crelu(h) self.p_ber_prob_logit = self.plin_ber_prob(h) return self.p_ber_prob_logit
def to_function(self): if self.nonlinearity.lower() == "clipped_relu": return clipped_relu() if self.nonlinearity.lower() == "crelu": return crelu() if self.nonlinearity.lower() == "elu": return elu() if self.nonlinearity.lower() == "hard_sigmoid": return hard_sigmoid() if self.nonlinearity.lower() == "leaky_relu": return leaky_relu() if self.nonlinearity.lower() == "relu": return relu() if self.nonlinearity.lower() == "sigmoid": return sigmoid() if self.nonlinearity.lower() == "softmax": return softmax() if self.nonlinearity.lower() == "softplus": return softplus() if self.nonlinearity.lower() == "tanh": return tanh() if self.nonlinearity.lower() == "bst": return bst() raise NotImplementedError()
def to_function(self): if self.nonlinearity.lower() == "clipped_relu": return clipped_relu() if self.nonlinearity.lower() == "crelu": return crelu() if self.nonlinearity.lower() == "elu": return elu() if self.nonlinearity.lower() == "hard_sigmoid": return hard_sigmoid() if self.nonlinearity.lower() == "leaky_relu": return leaky_relu() if self.nonlinearity.lower() == "relu": return relu() if self.nonlinearity.lower() == "sigmoid": return sigmoid() if self.nonlinearity.lower() == "softmax": return softmax() if self.nonlinearity.lower() == "softplus": return softplus() if self.nonlinearity.lower() == "tanh": return tanh() raise NotImplementedError()
def encode(self, x): h = F.crelu(self.qlin0(x)) for i in range(self.num_layers-1): layer_name = 'qlin' + str(i+1) h = F.crelu(self[layer_name](h)) self.qmu = self.qlin_mu(h) self.qln_var = self.qlin_ln_var(h) self.qh = self.qlin_h(h)
def decode(self, z): h = F.crelu(self.plin0(z)) for i in range(self.num_layers-1): layer_name = 'plin' + str(i+1) h = F.crelu(self[layer_name](h)) self.p_ber_prob_logit = self.plin_ber_prob(h)
def iaf(self, z, h, lin1, lin2): ms = F.crelu(lin1(F.concat((z, h), axis=1))) ms = lin2(ms) m, s = F.split_axis(ms, 2, axis=1) s = F.sigmoid(s) z = s*z + (1-s)*m # pdb.set_trace() return z, -F.sum(F.log(s), axis=1)
def encode_a(self, x): a_params = F.crelu(self.qlina0(x)) for i in range(self.num_layers-1): layer_name = 'qlina' + str(i+1) a_params = F.crelu(self[layer_name](a_params)) self.qmu_a = self.qlina_mu(a_params) self.qln_var_a = self.qlina_ln_var(a_params) return self.qmu_a, self.qln_var_a
def encode_z(self, x, a): # a = F.gaussian(self.qmu_a, self.qln_var_a) # This should be outside the encoding function. Pass the function a. net_input = F.concat((x,a), axis=1) h = F.crelu(self.qlinz0(net_input)) for i in range(self.num_layers-1): layer_name = 'qlinz' + str(i+1) h = F.crelu(self[layer_name](h)) self.qmu_z = self.qlinz_mu(h) self.qln_var_z = self.qlinz_ln_var(h) return self.qmu_z, self.qln_var_z
def decode(self,z): h = F.crelu(self.plinx0(z)) for i in range(self.num_layers-1): layer_name = 'plinx' + str(i+1) h = F.crelu(self[layer_name](h)) self.p_ber_prob_logit = self.plinx_ber_prob(h) return self.p_ber_prob_logit
def decode_a(self, z, x): net_input = F.concat((x,z), axis=1) h = F.crelu(self.plina0(net_input)) for i in range(self.num_layers-1): layer_name = 'plina' + str(i+1) h = F.crelu(self[layer_name](h)) self.pmu_a = self.plina_mu(h) self.pln_var_a = self.plina_ln_var(h) return self.pmu_a, self.pln_var_a
def decode(self,z): h = F.crelu(self.plinx0(z)) for i in range(self.num_layers-1): layer_name = 'plinx' + str(i+1) h = F.crelu(self[layer_name](h)) self.pmu = self.plinx_mu(h) self.pln_var = self.plinx_ln_var(h) return self.pmu, self.pln_var
def decode_a(self, z): # net_input = F.concat((x,z), axis=1) h = F.crelu(self.plina0(z)) for i in range(self.num_layers-1): layer_name = 'plina' + str(i+1) h = F.crelu(self[layer_name](h)) self.pmu_a = self.plina_mu(h) self.pln_var_a = self.plina_ln_var(h) return self.pmu_a, self.pln_var_a
def decode(self, z): h = F.crelu(self.plin0(z)) for i in range(self.num_layers-1): layer_name = 'plin' + str(i+1) h = F.crelu(self[layer_name](h)) self.pmu = self.plin_mu(h) self.pln_var = self.plin_ln_var(h) return self.pmu, self.pln_var
def decode(self,z): h = self.plinx0(z) h = self.plinx_batch_norm_0(h) h = F.crelu(h) for i in range(self.num_layers-1): layer_name = 'plinx' + str(i+1) h = self[layer_name](h) layer_name = 'plinx_batch_norm_' + str(i+1) h = self[layer_name](h) h = F.crelu(h) self.p_ber_prob_logit = self.plinx_ber_prob(h) return self.p_ber_prob_logit
def decode(self, z): h = F.crelu(self.plin0(z)) for i in range(self.num_layers-1): layer_name = 'plin' + str(i+1) h = F.crelu(self[layer_name](h)) self.pmu = self.plin_mu(h) self.pln_var = self.plin_ln_var(h) return self.pmu, self.qln_var
def encode(self, x): h = F.crelu(self.qlin0(x)) for i in range(self.num_layers-1): layer_name = 'qlin' + str(i+1) h = F.crelu(self[layer_name](h)) self.qmu = self.qlin_mu(h) self.qln_var = self.qlin_ln_var(h)
def decode(self, z): h = F.crelu(self.plin0(z)) for i in range(self.num_layers-1): layer_name = 'plin' + str(i+1) h = F.crelu(self[layer_name](h)) self.pmu = self.plin_mu(h) self.pln_var = self.plin_ln_var(h)
def encode(self, x): h = self.qlin0(x) h = self.qlin_batch_norm_0(h) h = F.crelu(h) for i in range(self.num_layers-1): layer_name = 'qlin' + str(i+1) h = self[layer_name](h) layer_name = 'qlin_batch_norm_' + str(i+1) h = self[layer_name](h) h = F.crelu(h) self.qmu = self.qlin_mu(h) self.qln_var = self.qlin_ln_var(h)
def decode(self, z): h = self.plin0(z) h = self.plin_batch_norm_0(h) h = F.crelu(h) for i in range(self.num_layers-1): layer_name = 'plin' + str(i+1) h = self[layer_name](h) layer_name = 'plin_batch_norm_' + str(i+1) h = self[layer_name](h) h = F.crelu(h) self.p_ber_prob_logit = self.plin_ber_prob(h)
def encode(self, x): h = F.crelu(self.qlin0(x)) for i in range(self.num_layers-1): layer_name = 'qlin' + str(i+1) h = F.crelu(self[layer_name](h)) self.qmu = self.qlin_mu(h) self.qln_var = self.qlin_ln_var(h) self.qh_vec_0 = self.qlin_h_vec_0(h) return self.qmu, self.qln_var, self.qh_vec_0
def decode(self, z): h = F.crelu(self.plin0(z)) for i in range(self.num_layers-1): layer_name = 'plin' + str(i+1) h = F.crelu(self[layer_name](h)) self.p_ber_prob_logit = self.plin_ber_prob(h) return self.p_ber_prob_logit
def encode(self, x): h = F.crelu(self.qlin0(x)) for i in range(self.num_layers-1): layer_name = 'qlin' + str(i+1) h = F.crelu(self[layer_name](h)) self.qmu = self.qlin_mu(h) self.qln_var = self.qlin_ln_var(h) return self.qmu, self.qln_var
def __call__(self, x): return functions.crelu(x, self.axis)
def __init__(self, axis=1): self._function = "crelu" self.axis = axis