我们从Python开源项目中,提取了以下20个代码示例,用于说明如何使用keras.initializations.normal()。
def plotGeneratedImages(epoch,example=100,dim=(10,10),figsize=(10,10)): noise = np.random.normal(0,1,size=(example,randomDim)) generatedImage = generator.predict(noise) generatedImage = generatedImage.reshape(example,28,28) plt.figure(figsize=figsize) for i in range(example): plt.subplot(dim[0],dim[1],i+1) plt.imshow(generatedImage[i],interpolation='nearest',cmap='gray') '''drop the x and y axis''' plt.axis('off') plt.tight_layout() if not os.path.exists('generated_image'): os.mkdir('generated_image') plt.savefig('generated_image/wgan_generated_img_epoch_%d.png' % epoch)
def get_q_network(weights_path): model = Sequential() model.add(Dense(1024, init=lambda shape, name: normal(shape, scale=0.01, name=name), input_shape=(25112,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(1024, init=lambda shape, name: normal(shape, scale=0.01, name=name))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(6, init=lambda shape, name: normal(shape, scale=0.01, name=name))) model.add(Activation('linear')) adam = Adam(lr=1e-6) model.compile(loss='mse', optimizer=adam) if weights_path != "0": model.load_weights(weights_path) return model
def initNormal(shape,name=None): return initializations.normal(shape,scale=0.2,name=name)
def test_normal(tensor_shape): _runner(initializations.normal, tensor_shape, target_mean=0., target_std=0.05)
def weights_init(shape, name=None, dim_ordering=None): return normal(shape, scale=0.01, name=name)
def conv2D_init(shape, dim_ordering='tf', name=None): return initializations.normal(shape, scale=0.02, dim_ordering=dim_ordering, name=name)
def unitary_svd_init(shape, name=None): assert shape[0]==shape[1] Re=initializations.normal(shape,scale=1.0,name=name).get_value() Im=initializations.normal(shape,scale=1.0,name=name).get_value() X = Re+1j*Im [U,S,V]=np.linalg.svd(X) X = np.dot(U,V) ReX = np.real(X) ImX = np.imag(X) Xaug = np.concatenate([ReX,ImX],axis=0) return K.variable(Xaug,name=name)
def my_init(shape, name=None): return initializations.normal(shape, scale=0.1, name=name) # Best val_loss: 0.0205 - val_acc: 0.9978 (just tried only once) # 30 minutes on Amazon EC2 g2.2xlarge (NVIDIA GRID K520)
def embedding_learning(train_data, user_dic, artist_dic, context_list, n_users, n_items): # User embeddings UC = np.random.normal(0.0, 0.01, (n_users, dim_num)) # Item embeddings IC = np.random.normal(0.0, 0.01, (n_items, dim_num)) try: for iteration in range(max_iters): print 'loading...iteration: %d'%iteration t = time.time() for each_data in train_data: u_i, i, w_i = each_data w_i = w_i ** dis_coef # print artist_dic[i] for u_j in context_list[u_i]: IC[artist_dic[i]] += learning_rate * ((1 - sigmoid(w_i)) * 2 * alpha * (UC[user_dic[u_i]] - UC[user_dic[u_j]]) - 2 * lamda * IC[artist_dic[i]]) UC[user_dic[u_i]] += learning_rate * ((1 - sigmoid(w_i)) * 2 * alpha * (IC[artist_dic[i]] - UC[user_dic[u_i]]) - 2 * lamda * UC[user_dic[u_i]]) UC[user_dic[u_j]] += learning_rate * ((1 - sigmoid(w_i)) * 2 * alpha * (IC[artist_dic[i]] - UC[user_dic[u_j]]) - 2 * lamda * UC[user_dic[u_j]]) # print IC[artist_dic[i]] print 'Iter: %d elapsed: %fseconds'%(iteration, time.time() - t) finally: np.save(model_dir + 'Item_Emb', IC) np.save(model_dir + 'User_Emb', UC) np.savetxt(model_dir + 'Item_Emb.txt', IC) np.savetxt(model_dir + 'User_Emb.txt', UC) print 'Model saved...'
def init_normal(shape, name=None): return initializations.normal(shape, scale=0.01, name=name)
def my_init(shape, name=None): return initializations.normal(shape, scale=1.2, name=name)
def create_classifier(body, data, n_classes, l2_reg=0.): # Include last layers top = BatchNormalization(mode=0, axis=channel_idx, name="bn7")(body) top = Activation('relu', name="relu7")(top) top = AtrousConvolution2D(512, 3, 3, 'he_normal', atrous_rate=(12, 12), border_mode='same', name="conv6a", W_regularizer=l2(l2_reg))(top) top = Activation('relu', name="conv6a_relu")(top) name = "hyperplane_num_cls_%d_branch_%d" % (n_classes, 12) def my_init(shape, name=None, dim_ordering='th'): return initializations.normal(shape, scale=0.01, name=name) top = AtrousConvolution2D(n_classes, 3, 3, my_init, atrous_rate=(12, 12), border_mode='same', name=name, W_regularizer=l2(l2_reg))(top) top = Deconvolution2D(n_classes, 16, 16, top._keras_shape, bilinear_init, 'linear', border_mode='valid', subsample=(8, 8), bias=False, name="upscaling_"+str(n_classes), W_regularizer=l2(l2_reg))(top) top = CropLayer2D(data, name='score')(top) top = NdSoftmax()(top) return top # Create model of basic segnet
def create_actor_network(self, state_size,action_dim): print("Now we build the model") S = Input(shape=[state_size]) h0 = Dense(HIDDEN1_UNITS, activation='relu')(S) h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0) Steering = Dense(1,activation='tanh',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1) Acceleration = Dense(1,activation='sigmoid',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1) Brake = Dense(1,activation='sigmoid',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1) V = merge([Steering,Acceleration,Brake],mode='concat') model = Model(input=S,output=V) return model, model.trainable_weights, S
def train(epochs=1,batchsize=128): batchCount = X_train.shape[0] / batchsize print 'Epochs',epochs print 'Bathc_size',batchsize print 'Batches per epoch',batchCount #range ande xrange the different is a list and a generator for e in xrange(1,epochs+1): print '-'*15 , 'Epoch %d' % e , '-'*15 for _ in tqdm(xrange(batchCount)): #Get a random set of input noise and images noise = np.random.normal(0,1,size=[batchsize,randomDim]) imageBatch = X_train[np.random.randint(0,X_train.shape[0],size=batchsize)] #generate fake MNIST images generatedImages = generator.predict(noise) #Default is axis=0, equal to vstack is concate up and down X = np.concatenate([imageBatch,generatedImages]) #Labels for generated and real data yDis = np.ones(2*batchsize) #one-sided label smoothing yDis[:batchsize] = -1 #Train discriminator discriminator.trainable = True dloss = discriminator.train_on_batch(X,yDis) #Train generator noise = np.random.normal(0,1,size=[batchsize,randomDim]) yGen = np.ones(batchsize) * -1 discriminator.trainable = False gloss = gan.train_on_batch(noise,yGen) ''' d_weight = discriminator.get_weights() d_weight = clip_weight(d_weight,-0.01,0.01) discriminator.set_weights(d_weight) ''' #Store loss of most recent batch from this epoch Dloss.append(dloss) Gloss.append(gloss) if e == 1 or e % 5 == 0: plotGeneratedImages(e) saveModels(e) plot_loss(e)