我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用chainer.Parameter()。
def add_all_variable_images(self, last_var, exclude_params=True, global_step=None, pattern='.*'): cp = re.compile(pattern) g = c.build_computational_graph(last_var) names = NodeName(g.nodes) for n in g.nodes: if isinstance(n, chainer.variable.VariableNode) and \ (exclude_params and not isinstance(n._variable(), chainer.Parameter)) and \ n.data is not None and \ cp.match(names.name(n)): data = chainer.cuda.to_cpu(n.data) assert data.ndim < 5, "'variable.data' must be less than 5. the given 'variable.data.ndim' is %d." % data.ndim if data.ndim == 4: for i, d in enumerate(data): img = make_grid(np.expand_dims(d, 1) if d.shape[0] != 3 else d) self.add_image(names.name(n) + '/' + str(i), img, global_step) else: img = make_grid(np.expand_dims(data, 1) if data.shape[0] != 3 else data) self.add_image(names.name(n), img, global_step)
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0, initialV=None, nobias=False, cover_all=False): super(Convolution1D, self).__init__() ksize = conv_nd.as_tuple(ksize, 1) self.ksize = ksize self.nobias = nobias self.stride = stride self.pad = pad self.out_channels = out_channels self.in_channels = in_channels self.cover_all = cover_all self.initialV = initialV with self.init_scope(): V_shape = (out_channels, in_channels) + ksize initialV = initializers._get_initializer(initialV) self.V = Parameter(initialV, V_shape) if nobias: self.b = None
def _initialize_params(self, t): xp = cuda.get_array_module(t) self.mean_t = xp.mean(t, axis=(0, 2)) # calculate average for each channel self.std_t = xp.sqrt(xp.var(t, axis=(0, 2))) # calculate stddev for each channel g = 1 / self.std_t b = -self.mean_t / self.std_t # print("g <- {}, b <- {}".format(g.reshape((-1,)), b.reshape((-1,)))) with self.init_scope(): if self.nobias == False: self.b = Parameter(b, b.shape) g_shape = (self.out_channels, 1) + (1,) * len(self.ksize) self.g = Parameter(g.reshape(g_shape), g_shape)
def __init__(self, n_input_channels, action_size, n_hidden_layers=0, n_hidden_channels=None, min_action=None, max_action=None, bound_mean=False, var_type='spherical', nonlinearity=F.relu, mean_wscale=1): self.n_input_channels = n_input_channels self.action_size = action_size self.n_hidden_layers = n_hidden_layers self.n_hidden_channels = n_hidden_channels self.min_action = min_action self.max_action = max_action self.bound_mean = bound_mean self.nonlinearity = nonlinearity var_size = {'spherical': 1, 'diagonal': action_size}[var_type] layers = [] layers.append(L.Linear(n_input_channels, n_hidden_channels)) for _ in range(n_hidden_layers - 1): layers.append(self.nonlinearity) layers.append(L.Linear(n_hidden_channels, n_hidden_channels)) layers.append(self.nonlinearity) # The last layer is used to compute the mean layers.append( L.Linear(n_hidden_channels, action_size, initialW=LeCunNormal(mean_wscale))) if self.bound_mean: layers.append(lambda x: bound_by_tanh( x, self.min_action, self.max_action)) super().__init__() with self.init_scope(): self.hidden_layers = links.Sequence(*layers) self.var_param = chainer.Parameter( initializer=0.0, shape=(var_size,))
def create_simple_link(): link = chainer.Link() with link.init_scope(): link.param = chainer.Parameter(np.zeros(1)) return link
def add_all_parameter_histograms(self, last_var, global_step=None, pattern='.*'): cp = re.compile(pattern) g = c.build_computational_graph(last_var) names = NodeName(g.nodes) for n in g.nodes: if isinstance(n, chainer.variable.VariableNode) and \ isinstance(n._variable(), chainer.Parameter) and \ cp.match(names.name(n)): data = chainer.cuda.to_cpu(n._variable().data) self.add_histogram(names.name(n), data, global_step)
def __init__(self, w, g): super(SimpleLink, self).__init__() with self.init_scope(): self.param = chainer.Parameter(w) self.param.grad = g
def setInitAllParameters(self, optimizer, init_type="default", init_scale=0.1): sys.stdout.write("############ Current Parameters BEGIN\n") self.printAllParameters(optimizer) sys.stdout.write("############ Current Parameters END\n") if init_type == "uniform": sys.stdout.write( "# initializer is [uniform] [%f]\n" % (init_scale)) t_initializer = chainer.initializers.Uniform(init_scale) named_params = sorted( optimizer.target.namedparams(), key=lambda x: x[0]) for n, p in named_params: with cuda.get_device(p.data): if args.chainer_version_check[0] == 2: p.copydata(chainer.Parameter( t_initializer, p.data.shape)) else: chainer.initializers.init_weight(p.data, t_initializer) elif init_type == "normal": sys.stdout.write("# initializer is [normal] [%f]\n" % (init_scale)) t_initializer = chainer.initializers.Normal(init_scale) named_params = sorted( optimizer.target.namedparams(), key=lambda x: x[0]) for n, p in named_params: with cuda.get_device(p.data): if args.chainer_version_check[0] == 2: p.copydata(chainer.Parameter( t_initializer, p.data.shape)) else: chainer.initializers.init_weight(p.data, t_initializer) else: # "default" sys.stdout.write( "# initializer is [defalit] [%f]\n" % (init_scale)) named_params = sorted( optimizer.target.namedparams(), key=lambda x: x[0]) for n, p in named_params: with cuda.get_device(p.data): p.data *= args.init_scale self.printAllParameters(optimizer, init_type, init_scale) return 0