我们从Python开源项目中,提取了以下12个代码示例,用于说明如何使用keras.layers.GaussianNoise()。
def build_encoder(self,input_shape): return [Reshape((*input_shape,1)), GaussianNoise(self.parameters['noise']), BN(), *[Convolution2D(self.parameters['clayer'],(3,3), activation=self.parameters['activation'],padding='same', use_bias=False), Dropout(self.parameters['dropout']), BN(), MaxPooling2D((2,2)),], *[Convolution2D(self.parameters['clayer'],(3,3), activation=self.parameters['activation'],padding='same', use_bias=False), Dropout(self.parameters['dropout']), BN(), MaxPooling2D((2,2)),], flatten, Sequential([ Dense(self.parameters['layer'], activation=self.parameters['activation'], use_bias=False), BN(), Dropout(self.parameters['dropout']), Dense(self.parameters['N']*self.parameters['M']), ])]
def build_encoder(self,input_shape): return [Reshape((*input_shape,1)), GaussianNoise(0.1), BN(), Convolution2D(self.parameters['clayer'],(3,3), activation=self.parameters['activation'],padding='same', use_bias=False), Dropout(self.parameters['dropout']), BN(), MaxPooling2D((2,2)), Convolution2D(self.parameters['clayer'],(3,3), activation=self.parameters['activation'],padding='same', use_bias=False), Dropout(self.parameters['dropout']), BN(), MaxPooling2D((2,2)), Convolution2D(self.parameters['clayer'],(3,3), activation=self.parameters['activation'],padding='same', use_bias=False), Dropout(self.parameters['dropout']), BN(), MaxPooling2D((2,2)), flatten,]
def create_trainable_model(self,sequences, pred_length, proxy_layer=None, need_noise_dropout=False, stddev=5.,sample_stddev=None): from keras.layers import Input, GaussianNoise from keras.models import Model from pp_layer import HawkesLayer if self.sequence_weights is None: sys.stderr.write(str({ 'error info':'unpretrained generator', }) + '\n') sys.stderr.flush() x = Input(batch_shape=(1,1), dtype='int32') hawkes_layer = HawkesLayer(sequences,pred_length,sequence_weights=self.sequence_weights,proxy_layer=proxy_layer,sample_stddev=sample_stddev) y = hawkes_layer(x) if need_noise_dropout == True: y = GaussianNoise(stddev)(y) model = Model(inputs=[x], outputs=[y], name='hawkes_output') self.model = model self.hawkes_layer = hawkes_layer return model
def CNN(input_shape=None, classes=1000): inputs = Input(shape=input_shape) # Block 1 x = GaussianNoise(0.3)(inputs) x = CBRD(x, 64) x = CBRD(x, 64) x = MaxPooling2D()(x) # Block 2 x = CBRD(x, 128) x = CBRD(x, 128) x = MaxPooling2D()(x) # Block 3 x = CBRD(x, 256) x = CBRD(x, 256) x = CBRD(x, 256) x = MaxPooling2D()(x) # Classification block x = Flatten(name='flatten')(x) x = DBRD(x, 4096) x = DBRD(x, 4096) x = Dense(classes, activation='softmax', name='predictions')(x) model = Model(inputs=inputs, outputs=x) return model
def build_encoder(self,input_shape): return [GaussianNoise(self.parameters['noise']), BN(), Dense(self.parameters['layer'], activation='relu', use_bias=False), BN(), Dropout(self.parameters['dropout']), Dense(self.parameters['layer'], activation='relu', use_bias=False), BN(), Dropout(self.parameters['dropout']), Dense(self.parameters['layer'], activation='relu', use_bias=False), BN(), Dropout(self.parameters['dropout']), Dense(self.parameters['N']*self.parameters['M']),]
def build_encoder(self,input_shape): last_convolution = np.array(input_shape) // 8 self.parameters['clayer'] = 8 self.parameters['N'] = int(np.prod(last_convolution)*self.parameters['clayer'] // self.parameters['M']) return [Reshape((*input_shape,1)), GaussianNoise(0.1), BN(), Convolution2D(16,(3,3), activation=self.parameters['activation'],padding='same', use_bias=False), Dropout(self.parameters['dropout']), BN(), MaxPooling2D((2,2)), Convolution2D(64,(3,3), activation=self.parameters['activation'],padding='same', use_bias=False), SpatialDropout2D(self.parameters['dropout']), BN(), MaxPooling2D((2,2)), Convolution2D(64,(3,3), activation=self.parameters['activation'],padding='same', use_bias=False), SpatialDropout2D(self.parameters['dropout']), BN(), MaxPooling2D((2,2)), Convolution2D(64,(1,1), activation=self.parameters['activation'],padding='same', use_bias=False), SpatialDropout2D(self.parameters['dropout']), BN(), Convolution2D(self.parameters['clayer'],(1,1), padding='same'), flatten, ] # mixin classes ############################################################### # Now effectively 3 subclasses; GumbelSoftmax in the output, Convolution, Gaussian. # there are 4 more results of mixins:
def test_keras_import(self): model = Sequential() model.add(GaussianNoise(stddev=0.1, input_shape=(1, 16))) model.build() self.keras_param_test(model, 0, 1)
def test_keras_export(self): tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app', 'keras_export_test.json'), 'r') response = json.load(tests) tests.close() net = yaml.safe_load(json.dumps(response['net'])) net = {'l0': net['Input'], 'l1': net['GaussianNoise']} net['l0']['connection']['output'].append('l1') inp = data(net['l0'], '', 'l0')['l0'] net = gaussian_noise(net['l1'], [inp], 'l1') model = Model(inp, net['l1']) self.assertEqual(model.layers[1].__class__.__name__, 'GaussianNoise')
def gaussian_noise(layer, layer_in, layerId): stddev = layer['params']['stddev'] out = {layerId: GaussianNoise(stddev=stddev)(*layer_in)} return out
def keepsize_256(nx, ny, noise, depth, activation='relu', n_filters=64, l2_reg=1e-7): """ Deep residual network that keeps the size of the input throughout the whole network """ def residual(inputs, n_filters): x = ReflectionPadding2D()(inputs) x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x = BatchNormalization()(x) x = Activation(activation)(x) x = ReflectionPadding2D()(x) x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x = BatchNormalization()(x) x = add([x, inputs]) return x inputs = Input(shape=(nx, ny, 1)) x = GaussianNoise(noise)(inputs) x = ReflectionPadding2D()(x) x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x0 = Activation(activation)(x) x = residual(x0, n_filters) for i in range(depth-1): x = residual(x, n_filters) x = ReflectionPadding2D()(x) x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x = BatchNormalization()(x) x = add([x, x0]) # Upsampling for superresolution x = UpSampling2D()(x) x = ReflectionPadding2D()(x) x = Conv2D(4*n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x = Activation(activation)(x) final = Conv2D(1, (1, 1), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) return Model(inputs=inputs, outputs=final)
def test_delete_channels_noise(channel_index, data_format): layer_test_helper_flatten_2d(GaussianNoise(0.5), channel_index, data_format) layer_test_helper_flatten_2d(GaussianDropout(0.5), channel_index, data_format) layer_test_helper_flatten_2d(AlphaDropout(0.5), channel_index, data_format)
def keepsize(nx, ny, noise, depth, activation='relu', n_filters=64, l2_reg=1e-7): """ Deep residual network that keeps the size of the input throughout the whole network """ def residual(inputs, n_filters): x = ReflectionPadding2D()(inputs) x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x = BatchNormalization()(x) x = Activation(activation)(x) x = ReflectionPadding2D()(x) x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x = BatchNormalization()(x) x = add([x, inputs]) return x inputs = Input(shape=(nx, ny, 1)) x = GaussianNoise(noise)(inputs) x = ReflectionPadding2D()(x) x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x0 = Activation(activation)(x) x = residual(x0, n_filters) for i in range(depth-1): x = residual(x, n_filters) x = ReflectionPadding2D()(x) x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x = BatchNormalization()(x) x = add([x, x0]) # Upsampling for superresolution x = UpSampling2D()(x) x = ReflectionPadding2D()(x) x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x = Activation(activation)(x) final = Conv2D(1, (1, 1), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) return Model(inputs=inputs, outputs=final)