我们从Python开源项目中,提取了以下24个代码示例,用于说明如何使用network.Network()。
def basic_conv(n=3, epochs=60): nets = [] # list of networks (for ensemble, if desired) for j in range(n): net = Network([ ConvLayer(image_shape=(mini_batch_size, 1, 64, 512), filter_shape=(20, 1, 3, 3), stride=(1, 1), activation_fn=relu), ConvPoolLayer(image_shape=(mini_batch_size, 20, 64, 512), filter_shape=(40, 20, 3, 3), stride=(1, 1), poolsize=(2, 2), activation_fn=relu), ConvPoolLayer(image_shape=(mini_batch_size, 40, 32, 256), filter_shape=(80, 40, 3, 3), stride=(1, 1), poolsize=(2, 2), activation_fn=relu), FullyConnectedLayer(n_in=80*16*128, n_out=100), SoftmaxLayer(n_in=100, n_out=2)], mini_batch_size, 50) net.SGD(train_data, epochs, mini_batch_size, 0.1, validation_data, test_data, lmbda=0.0) nets.append(net) # Add current network to list return nets
def basic_conv(n=3, epochs=60): nets = [] # list of networks (for ensemble, if desired) for j in range(n): net = Network([ ConvPoolLayer(image_shape=(mini_batch_size, 3, 32, 32), filter_shape=(32, 3, 3, 3), stride=(1, 1), poolsize=(2, 2), activation_fn=relu), ConvPoolLayer(image_shape=(mini_batch_size, 32, 16, 16), filter_shape=(80, 32, 3, 3), stride=(1, 1), poolsize=(2, 2), activation_fn=relu), ConvPoolLayer(image_shape=(mini_batch_size, 80, 8, 8), filter_shape=(128, 80, 3, 3), stride=(1, 1), poolsize=(2, 2), activation_fn=relu), FullyConnectedLayer(n_in=128*4*4, n_out=100), SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size) net.SGD(train_data, epochs, mini_batch_size, 0.01, validation_data, test_data) nets.append(net) # Add current network to list return nets
def basic_conv(n=3, epochs=60): nets = [] # list of networks (for ensemble, if desired) for j in range(n): net = Network([ ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28), filter_shape=(20, 1, 5, 5), stride=(1, 1), poolsize=(2, 2), activation_fn=relu), ConvPoolLayer(image_shape=(mini_batch_size, 20, 14, 14), filter_shape=(40, 20, 5, 5), stride=(1, 1), poolsize=(2, 2), activation_fn=relu), FullyConnectedLayer(n_in=40*7*7, n_out=100), SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size) net.SGD(training_data, epochs, mini_batch_size, 0.1, validation_data, test_data) nets.append(net) # Add current network to list return nets
def create_population(self, count): """Create a population of random networks. Args: count (int): Number of networks to generate, aka the size of the population Returns: (list): Population of network objects """ pop = [] for _ in range(0, count): # Create a random network. network = Network(self.nn_param_choices) network.create_random() # Add the network to our population. pop.append(network) return pop
def mutate(self, network): """Randomly mutate one part of the network. Args: network (dict): The network parameters to mutate Returns: (Network): A randomly mutated network object """ # Choose a random key. mutation = random.choice(list(self.nn_param_choices.keys())) # Mutate one of the params. network.network[mutation] = random.choice(self.nn_param_choices[mutation]) return network
def __init__(self, config): self.config = config self.data = DataSet(self.config) self.add_placeholders() self.summarizer = tf.summary self.net = Network(config) self.saver = tf.train.Saver() self.epoch_count, self.second_epoch_count = 0, 0 self.outputs, self.prob = self.net.neural_search() self.hyperparams = self.net.gen_hyperparams(self.outputs) self.hype_list = [1 for i in range(self.config.hyperparams)] #[7, 7, 24, 5, 5, 36, 3, 3, 48, 64] self.reinforce_loss = self.net.REINFORCE(self.prob) self.tr_cont_step = self.net.train_controller(self.reinforce_loss, self.val_accuracy) self.cNet, self.y_pred = self.init_child(self.hype_list) self.cross_loss, self.accuracy, self.tr_model_step = self.grow_child() self.init = tf.global_variables_initializer() self.local_init = tf.local_variables_initializer()
def BBPSO(): #initialize the particles particles = [Particle(Network([784,30,10])) for i in range(SWARM_SIZE)] for it in range(ITERATIONS): # update global best with best of all particles gbest = particles[0].best gbest_score = particles[0].best_score for i in range(SWARM_SIZE): p = particles[i] if p.best_score > gbest_score: gbest = p.best gbest_score = p.best_score if it % 100 == 0: print str(it)+": global best score " + str(gbest_score) for i in range(SWARM_SIZE): p = particles[i] pmu = p.best.biases + gbest.biases, \ p.best.weights + gbest.weights pmu = pmu[0] / 2.0, pmu[1] / 2.0 psigma = np.abs(p.best.biases - gbest.biases), \ np.abs(p.best.weights - gbest.weights) pos = Network([784,30,10], mu=pmu, sigma=psigma) p.pos = pos fit = pos.fitness(X, labels) if fit > p.best_score: p.best = pos p.best_score = fit
def BBPSO_cost(ITERATIONS, SWARM_SIZE): # initialize the particles particles = [Particle(Network([784, 30, 10])) for i in range(SWARM_SIZE)] for p in particles: p.best_score = 1e20 gbest = None for it in range(ITERATIONS): # update global best with best of all particles gbest = particles[0].best gbest_score = particles[0].best_score for i in range(SWARM_SIZE): p = particles[i] if p.best_score < gbest_score: gbest = p.best gbest_score = p.best_score fit = gbest.fitness(X, labels) if it % 100 == 0: print str(it)+": global best score " + str(gbest_score)+" correct "+str(fit) for i in range(SWARM_SIZE): p = particles[i] pmu = p.best.biases + gbest.biases, \ p.best.weights + gbest.weights pmu = pmu[0] / 2.0, pmu[1] / 2.0 psigma = np.abs(p.best.biases - gbest.biases), \ np.abs(p.best.weights - gbest.weights) pos = Network([784,30,10], mu=pmu, sigma=psigma) p.pos = pos c = pos.cost(X, labels) if c < p.best_score: p.best = pos p.best_score = c print "final best score " + str(gbest_score) + " correct " + str(fit) return gbest # BBPSO()
def __init__(self, **hyperparas): self.hyperparas = hyperparas sizes = self.hyperparas.get('sizes', [784, 10]) self.model = Network(sizes)
def build_network(self): print 'Building network...' self.nn_network = Network(self.__options) return self.nn_network.build_training_graph(self.__options)
def __init__(self,network=0): if network==0: self.network = Network(self.networkArchitecture) else: self.network = network self.genes = self.network.togenes()
def load(filename): """Load a neural network from the file ``filename``. Returns an instance of Network. """ f = open(filename, "r") data = json.load(f) f.close() net = Network(data["sizes"]) net.weights = [np.array(w) for w in data["weights"]] net.biases = [np.array(b) for b in data["biases"]] return net
def generateRandomGenome(): net = Network(Config.networkArchitecture) genome = Genome(net) return genome
def generate_network_list(nn_param_choices): """Generate a list of all possible networks. Args: nn_param_choices (dict): The parameter choices Returns: networks (list): A list of network objects """ networks = [] # This is silly. for nbn in nn_param_choices['nb_neurons']: for nbl in nn_param_choices['nb_layers']: for a in nn_param_choices['activation']: for o in nn_param_choices['optimizer']: # Set the parameters. network = { 'nb_neurons': nbn, 'nb_layers': nbl, 'activation': a, 'optimizer': o, } # Instantiate a network object with set parameters. network_obj = Network() network_obj.create_set(network) networks.append(network_obj) return networks
def breed(self, mother, father): """Make two children as parts of their parents. Args: mother (dict): Network parameters father (dict): Network parameters Returns: (list): Two network objects """ children = [] for _ in range(2): child = {} # Loop through the parameters and pick params for the kid. for param in self.nn_param_choices: child[param] = random.choice( [mother.network[param], father.network[param]] ) # Now create a network object. network = Network(self.nn_param_choices) network.create_set(child) children.append(network) return children
def setUp(self): self.close_friends = set(["David", "Christy", "Carmela", "Evelyn"]) self.not_so_close_friends = set(["Tony", "Ivan", "Alex", "Julie"]) self.strangers = set(["UnknownGuy", "UnknownGirl"]) self.dislike = set(["Marcus"]) self.Group = namedtuple("Data", ["group_name", "value"]) self.Data = namedtuple("Errors", ["error", "group"]) self.new_group = self.Group("haters", set(["Willock"])) self.existing_group = self.Group("strangers", set(["J.Cole"])) self.bad_value_group = self.Group("love", None) self.bad_groups = [self.Data(NameError, self.existing_group), self.Data(TypeError, self.bad_value_group)] self.a_group_name = "strangers" self.not_a_group_name = "power_rangers" self.network = Network(self.close_friends, self.not_so_close_friends, self.strangers, self.dislike) self.print_network_lines = ["Social Network:", "close_friends: {", "people_i_dislike: {'Marcus'}", "not_so_close_friends: {", "strangers: {"]
def main(_): model_dir = util.get_model_dir(conf, ['data_dir', 'sample_dir', 'max_epoch', 'test_step', 'save_step', 'is_train', 'random_seed', 'log_level', 'display', 'runtime_base_dir', 'occlude_start_row', 'num_generated_images']) util.preprocess_conf(conf) validate_parameters(conf) data = 'mnist' if conf.data == 'color-mnist' else conf.data DATA_DIR = os.path.join(conf.runtime_base_dir, conf.data_dir, data) SAMPLE_DIR = os.path.join(conf.runtime_base_dir, conf.sample_dir, conf.data, model_dir) util.check_and_create_dir(DATA_DIR) util.check_and_create_dir(SAMPLE_DIR) dataset = get_dataset(DATA_DIR, conf.q_levels) with tf.Session() as sess: network = Network(sess, conf, dataset.height, dataset.width, dataset.channels) stat = Statistic(sess, conf.data, conf.runtime_base_dir, model_dir, tf.trainable_variables()) stat.load_model() if conf.is_train: train(dataset, network, stat, SAMPLE_DIR) else: generate(network, dataset.height, dataset.width, SAMPLE_DIR)
def get_network(dens_scope): util.preprocess_conf(conf) network = Network(conf, 42, 42, 1, dens_scope) return network
def get_network(): util.preprocess_conf(conf) network = Network(conf, 42, 42, 1) return network
def _get_network(self, traffic_port, index=None, reverse=False): interfaces = [self.clients['traffic'].get_interface(traffic_port)] interfaces.extend(self.worker.get_network_interfaces(index)) return Network(interfaces, reverse)
def fit(self, sess, summarizer): sess.run(self.init) sess.run(self.local_init) max_epochs = self.config.max_epochs self.epoch_count, val_accuracy, reward = 0, 0.0, 1.0 while self.epoch_count < max_epochs: # Creation of new Child Network from new Hyperparameters self.hype_list = sess.run(self.hyperparams) hyperfoo = {"Filter Row 1": self.hype_list[0], "Filter Column 1": self.hype_list[1], "No Filter 1": self.hype_list[2], "Filter Row 2": self.hype_list[3], "Filter Column 2": self.hype_list[4], "No Filter 2": self.hype_list[5], "Filter Row 3": self.hype_list[6], "Filter Column 3": self.hype_list[7], "No Filter 3": self.hype_list[8], "No Neurons": self.hype_list[9]} output = "" for key in hyperfoo: output += "{} : {}\n".format(key, hyperfoo[key]) with open("../stdout/hyperparams.log", "a+") as f: f.write(output + "\n\n") print(sess.run(self.outputs)) print(output + "\n") self.second_epoch_count = 0 while self.second_epoch_count < max_epochs : average_loss, tr_step = self.run_model_epoch(sess, "train", summarizer['train'], self.second_epoch_count) if not self.config.debug: val_loss, val_accuracy = self.run_model_eval(sess, "validation", summarizer['val'], tr_step) reward = sum(val_accuracy[-5:]) ** 3 output = "=> Training : Loss = {:.3f} | Validation : Loss = {:.3f}, Accuracy : {:.3f}".format(average_loss, val_loss, val_accuracy[-1]) with open("../stdout/validation.log", "a+") as f: f.write(output) print(output) self.second_epoch_count += 1 _ = sess.run(self.tr_cont_step, feed_dict={self.val_accuracy : reward}) test_loss, test_accuracy = self.run_model_eval(sess, "test", summarizer['test'], tr_step) self.epoch_count += 1 self.cNet, self.y_pred = self.init_child(self.hype_list) self.cross_loss, self.accuracy, self.tr_model_step = self.grow_child() returnDict = {"test_loss" : test_loss, "test_accuracy" : test_accuracy} self.saver.save(sess, self.config.ckptdir_path + "/model_best.ckpt") return returnDict
def __init__(self, con=None): self._con = con self._dns = DNS(self) self._dhcp = DHCP(self) self._ftp = PureFTP(self) self._network = Network(self) self._firewall = Firewall(self)
def _getFactoryEnabledClasses(self): return (("", "UCI", UCI()), ("", "DNS", DNS()), ("", "DHCP", DHCP()), ("", "PureFTP", PureFTP()), ("", "Network", Network()), ("", "Firewall", Firewall()), ("", "OpenWRTManager", OpenWRTManager()))
def __init__(self, layer_sizes, n_samples, alpha, learning_rate, v_prior, batch_size, X_train, y_train, N_train): layer_sizes = copy.copy(layer_sizes) layer_sizes[ 0 ] = layer_sizes[ 0 ] + 1 print layer_sizes self.batch_size = batch_size self.N_train = N_train self.X_train = X_train self.y_train = y_train self.rate = learning_rate # We create the network self.network = network.Network(layer_sizes, n_samples, v_prior, N_train) # index to a batch index = T.lscalar() self.indexes = T.vector('index', dtype = 'int32') indexes_train = theano.shared(value = np.array(range(0, N_train), dtype = np.int32), borrow = True) self.x = T.tensor3('x',dtype=theano.config.floatX) self.y = T.matrix('y', dtype =theano.config.floatX) self.lr = T.fscalar() # The logarithm of the values for the likelihood factors sampl = T.bscalar() self.fwpass = theano.function(outputs=self.network.output(self.x,False,samples=sampl,use_indices=False), inputs=[self.x,sampl],allow_input_downcast=True) ll_train = self.network.log_likelihood_values(self.x, self.y, self.indexes, 0.0, 1.0) self.estimate_marginal_ll = (-1.0 * N_train / (self.x.shape[ 1 ] * alpha) * \ T.sum(LogSumExp(alpha * (T.sum(ll_train, 2) - self.network.log_f_hat() - self.network.log_f_hat_z()), 0)+ \ T.log(1.0 / n_samples)) - self.network.log_normalizer_q() - 1.0 * N_train / self.x.shape[ 1 ] * self.network.log_normalizer_q_z() + \ self.network.log_Z_prior()) # We create a theano function for updating q upd = adam(self.estimate_marginal_ll, self.network.params,indexes_train[index*batch_size:(index+1)*batch_size],self.rate,rescale_local=np.float32(N_train/batch_size)) self.process_minibatch = theano.function([ index], self.estimate_marginal_ll, \ updates = upd, \ givens = { self.x: T.tile(self.X_train[ index * batch_size: (index + 1) * batch_size] , [ n_samples, 1, 1 ]), self.y: self.y_train[ index * batch_size: (index + 1) * batch_size ], self.indexes: indexes_train[ index * batch_size : (index + 1) * batch_size ] }) # We create a theano function for making predictions self.error_minibatch_train = theano.function([ index ], T.sum((T.mean(self.network.output(self.x,self.indexes), 0, keepdims = True)[ 0, :, : ] - self.y)**2) / layer_sizes[ -1 ], givens = { self.x: T.tile(self.X_train[ index * batch_size: (index + 1) * batch_size ], [ n_samples, 1, 1 ]), self.y: self.y_train[ index * batch_size: (index + 1) * batch_size ], self.indexes: indexes_train[ index * batch_size : (index + 1) * batch_size ] }) self.ll_minibatch_train = theano.function([ index ], T.sum(LogSumExp(T.sum(ll_train, 2), 0) + T.log(1.0 / n_samples)), \ givens = { self.x: T.tile(self.X_train[ index * batch_size: (index + 1) * batch_size ], [ n_samples, 1, 1 ]), self.y: self.y_train[ index * batch_size: (index + 1) * batch_size ], self.indexes: indexes_train[ index * batch_size : (index + 1) * batch_size ] })