我们从Python开源项目中,提取了以下29个代码示例,用于说明如何使用keras.engine.Layer()。
def build(self, input_shape): # Used purely for shape validation. if not isinstance(input_shape, list) or len(input_shape) != 2: raise ValueError('A `Match` layer should be called ' 'on a list of 2 inputs.') self.shape1 = input_shape[0] self.shape2 = input_shape[1] if self.shape1[0] != self.shape2[0]: raise ValueError( 'Dimension incompatibility ' '%s != %s. ' % (self.shape1[0], self.shape2[0]) + 'Layer shapes: %s, %s' % (self.shape1, self.shape2)) if self.shape1[2] != self.shape2[2]: raise ValueError( 'Dimension incompatibility ' '%s != %s. ' % (self.shape1[2], self.shape2[2]) + 'Layer shapes: %s, %s' % (self.shape1, self.shape2))
def reset_states(self): assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape if not input_shape[0]: raise Exception('If a RNN is stateful, it needs to know ' 'its batch size. Specify the batch size ' 'of your input tensors: \n' '- If using a Sequential model, ' 'specify the batch size by passing ' 'a `batch_input_shape` ' 'argument to your first layer.\n' '- If using the functional API, specify ' 'the time dimension by passing a ' '`batch_shape` argument to your Input layer.') if hasattr(self, 'states'): K.set_value(self.states[0], np.zeros((input_shape[0], self.output_dim))) else: self.states = [K.zeros((input_shape[0], self.output_dim))]
def reset_states(self): assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape if not input_shape[0]: raise Exception('If a RNN is stateful, a complete ' + 'input_shape must be provided (including batch size).') if hasattr(self, 'states'): K.set_value(self.states[0], np.zeros((input_shape[0], self.hidden_recurrent_dim))) K.set_value(self.states[1], np.zeros((input_shape[0], self.input_dim))) K.set_value(self.states[2], np.zeros((input_shape[0], self.hidden_dim))) else: self.states = [K.zeros((input_shape[0], self.hidden_recurrent_dim)), K.zeros((input_shape[0], self.input_dim)), K.zeros((input_shape[0], self.hidden_dim))]
def reset_states(self): assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape if not input_shape[0]: raise ValueError('If a RNN is stateful, it needs to know ' 'its batch size. Specify the batch size ' 'of your input tensors: \n' '- If using a Sequential model, ' 'specify the batch size by passing ' 'a `batch_input_shape` ' 'argument to your first layer.\n' '- If using the functional API, specify ' 'the time dimension by passing a ' '`batch_shape` argument to your Input layer.') if hasattr(self, 'states'): K.set_value(self.states[0], np.zeros((input_shape[0], self.input_dim))) K.set_value(self.states[1], np.zeros((input_shape[0], self.output_dim))) else: self.states = [K.zeros((input_shape[0], self.input_dim)), K.zeros((input_shape[0], self.output_dim))]
def build(self, input_shape): # Used purely for shape validation. if not isinstance(input_shape, list) or len(input_shape) != 2: raise ValueError('A `MatchTensor` layer should be called ' 'on a list of 2 inputs.') shape1 = input_shape[0] shape2 = input_shape[1] if shape1[0] != shape2[0]: raise ValueError( 'Dimension incompatibility ' '%s != %s. ' % (shape1[0], shape2[0]) + 'Layer shapes: %s, %s' % (shape1, shape2)) if self.init_diag: if shape1[2] != shape2[2]: raise ValueError( 'Use init_diag need same embedding shape.' ) M_diag = np.float32(np.random.uniform(-0.05, 0.05, [self.channel, shape1[2], shape2[2]])) for i in range(self.channel): for j in range(shape1[2]): M_diag[i][j][j] = 1.0 self.M = self.add_weight( name='M', shape=(self.channel, shape1[2], shape2[2]), initializer=M_diag, trainable=True ) else: self.M = self.add_weight( name='M', shape=(self.channel, shape1[2], shape2[2]), initializer='uniform', trainable=True )
def call(self, x, mask=None): assert self.built, 'Layer must be built before being called' input_shape = K.int_shape(x) reduction_axes = list(range(len(input_shape))) del reduction_axes[self.axis] broadcast_shape = [1] * len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis] if sorted(reduction_axes) == range(K.ndim(x))[:-1]: x_normed = K.batch_normalization( x, self.running_mean, self.running_std, self.beta, self.gamma, epsilon=self.epsilon) else: # need broadcasting broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape) broadcast_running_std = K.reshape(self.running_std, broadcast_shape) broadcast_beta = K.reshape(self.beta, broadcast_shape) broadcast_gamma = K.reshape(self.gamma, broadcast_shape) x_normed = K.batch_normalization( x, broadcast_running_mean, broadcast_running_std, broadcast_beta, broadcast_gamma, epsilon=self.epsilon) return x_normed
def reset_states(self): assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape if not input_shape[0]: raise Exception('If a RNN is stateful, a complete ' + 'input_shape must be provided (including batch size).') if hasattr(self, 'states'): K.set_value(self.states[0], np.zeros((input_shape[0], self.output_dim))) K.set_value(self.states[1], np.zeros((input_shape[0], self.output_dim))) else: self.states = [K.zeros((input_shape[0], self.output_dim)), K.zeros((input_shape[0], self.output_dim))]
def reset_states(self): assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape if not input_shape[0]: raise Exception('If a RNN is stateful, a complete ' + 'input_shape must be provided (including batch size).') if hasattr(self, 'states'): K.set_value(self.states[0], np.zeros((input_shape[0], self.output_dim))) else: self.states = [K.zeros((input_shape[0], self.output_dim))]
def get_h_given_x_layer(self, as_initial_layer=False): """ Generates a new Dense Layer that computes mean of Bernoulli distribution p(h|x), ie. p(h=1|x). """ if as_initial_layer: layer = Dense(input_dim=self.input_dim, output_dim=self.hidden_dim, activation=self.activation, weights=[self.W.get_value(), self.bh.get_value()]) else: layer = Dense(output_dim=self.hidden_dim, activation=self.activation, weights=[self.W.get_value(), self.bh.get_value()]) return layer
def get_x_given_h_layer(self, as_initial_layer=False): """ Generates a new Dense Layer that computes mean of Bernoulli distribution p(x|h), ie. p(x=1|h). """ if as_initial_layer: layer = Dense(input_dim=self.hidden_dim, output_dim=self.input_dim, activation='sigmoid', weights=[self.W.get_value().T, self.bx.get_value()]) else: layer = Dense(output_dim=self.input_dim, activation='sigmoid', weights=[self.W.get_value().T, self.bx.get_value()]) return layer
def get_x_given_h_layer(self, as_initial_layer=False): """ Generates a new Dense Layer that computes mean of Gaussian distribution p(x|h). """ if not as_initial_layer: layer = Dense(output_dim=self.input_dim, activation='linear', weights=[self.W.get_value().T, self.bx.get_value()]) else: layer = Dense(input_dim=self.hidden_dim, output_dim=self.input_dim, activation='linear', weights=[self.W.get_value().T, self.bx.get_value()]) return layer
def get_h_given_x_layer(self, as_initial_layer=False): """ Generates a new Dense Layer that computes mean of Bernoulli distribution p(h|x), ie. p(h=1|x). """ if as_initial_layer: layer = Dense(input_dim=self.input_dim, output_dim=self.hidden_dim, activation="relu", weights=[self.W.get_value(), self.bh.get_value()]) else: layer = Dense(output_dim=self.hidden_dim, activation="relu", weights=[self.W.get_value(), self.bh.get_value()]) return layer
def __init__(self, patch_size=5, dim_ordering='tf', border_mode='valid', stride=(2, 2), activation=None, **kwargs): if border_mode != 'valid': raise ValueError('Invalid border mode for Correlation Layer ' '(only "valid" is supported):', border_mode) self.kernel_size = patch_size self.subsample = stride self.dim_ordering = dim_ordering self.border_mode = border_mode self.activation = activations.get(activation) super(Normalized_Correlation_Layer, self).__init__(**kwargs)
def reset_states(self): assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape if not input_shape[0]: raise Exception('If a RNN is stateful, a complete ' + 'input_shape must be provided (including batch size).') if hasattr(self, 'states'): K.set_value(self.states[0], np.zeros((input_shape[1], input_shape[0], self.output_dim))) K.set_value(self.states[1], np.zeros((input_shape[1], input_shape[0], self.output_dim))) else: self.states = [K.zeros((input_shape[1], input_shape[0], self.output_dim)), K.zeros((input_shape[1], input_shape[0], self.output_dim))]
def reset_states(self): assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape if not input_shape[0]: raise Exception('If a RNN is stateful, a complete ' + 'input_shape must be provided (including batch size).') if hasattr(self, 'states'): K.set_value(self.states[0], np.zeros((input_shape[0], self.output_dim))) K.set_value(self.states[1], np.zeros((input_shapes[1], input_shape[0], self.output_dim))) else: self.states = [K.zeros((input_shape[0], self.output_dim)), K.zeros((input_shapes[1], input_shape[0], self.output_dim))]
def return_custom(): import keras.backend as K from keras.engine import Layer class Dropout_cust(Layer): # pragma: no cover '''Applies Dropout to the input. ''' def __init__(self, p, **kwargs): self.p = p if 0. < self.p < 1.: self.uses_learning_phase = True self.supports_masking = True super(Dropout_cust, self).__init__(**kwargs) def call(self, x, mask=None): if 0. < self.p < 1.: x = K.in_train_phase(K.dropout(x, level=self.p), x) return x def get_config(self): config = {'p': self.p} base_config = super(Dropout_cust, self).get_config() return dict(list(base_config.items()) + list(config.items())) return Dropout_cust
def create_predictive_net(self) -> Sequential: """Returns the part of the net that predicts grapheme probabilities given a spectrogram. A loss operation is not contained. As described here: https://arxiv.org/pdf/1609.03193v2.pdf """ def convolution(name: str, filter_count: int, filter_length: int, strides: int = 1, activation: str = self.activation, input_dim: int = None, never_dropout: bool = False) -> List[Layer]: return ([] if self.dropout is None or never_dropout else [ Dropout(self.dropout, input_shape=(None, input_dim), name="dropout_before_{}".format(name))]) + [ Conv1D(filters=filter_count, kernel_size=filter_length, strides=strides, activation=activation, name=name, input_shape=(None, input_dim), padding="same")] main_filter_count = 250 def input_convolutions() -> List[Conv1D]: raw_wave_convolution_if_needed = convolution( "wave_conv", filter_count=main_filter_count, filter_length=250, strides=160, input_dim=self.input_size_per_time_step) if self.use_raw_wave_input else [] return raw_wave_convolution_if_needed + convolution( "striding_conv", filter_count=main_filter_count, filter_length=48, strides=2, input_dim=None if self.use_raw_wave_input else self.input_size_per_time_step) def inner_convolutions() -> List[Conv1D]: return [layer for i in range(1, 8) for layer in convolution("inner_conv_{}".format(i), filter_count=main_filter_count, filter_length=7)] def output_convolutions() -> List[Conv1D]: out_filter_count = 2000 return [layer for conv in [ convolution("big_conv_1", filter_count=out_filter_count, filter_length=32, never_dropout=True), convolution("big_conv_2", filter_count=out_filter_count, filter_length=1, never_dropout=True), convolution("output_conv", filter_count=self.grapheme_encoding.grapheme_set_size, filter_length=1, activation=self.output_activation, never_dropout=True) ] for layer in conv] layers = input_convolutions() + inner_convolutions() + output_convolutions() if self.frozen_layer_count > 0: log("All but {} layers frozen.".format(len(layers) - self.frozen_layer_count)) for layer in layers[:self.frozen_layer_count]: layer.trainable = False return Sequential(layers)
def reset_states(self): print("begin reset_states(self)") assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape self.depth = 0 if not input_shape[0]: raise Exception('If a RNN is stateful, a complete ' + 'input_shape must be provided (including batch size).') if hasattr(self, 'states'): # K.set_value(self.states[0], # np.zeros((input_shape[0], self.output_dim))) # K.set_value(self.states[1], # np.zeros((input_shape[0], self.output_dim))) # add by Robot Steven ****************************************# # previous inner memory K.set_value(self.states[0], np.zeros((input_shape[0], self.controller_output_dim))) # previous inner cell K.set_value(self.states[1], np.zeros((input_shape[0], self.controller_output_dim))) # previous memory K.set_value(self.states[2], np.zeros((input_shape[0], self.memory_dim * self.memory_size))) # K.set_value(self.states[2], # np.zeros((input_shape[0], self.memory_size, self.memory_dim))) # previous writing addresses K.set_value(self.states[3], np.zeros((input_shape[0], self.num_write_head * self.memory_size))) # K.set_value(self.states[3], # np.zeros((input_shape[0], self.num_write_head * self.memory_size))) # previous reading addresses K.set_value(self.states[4], np.zeros((input_shape[0], self.num_read_head * self.memory_size))) # previous reading content K.set_value(self.states[5], np.zeros((input_shape[0], self.num_read_head * self.memory_dim))) # add by Robot Steven ****************************************# else: # self.states = [K.zeros((input_shape[0], self.output_dim)), # K.zeros((input_shape[0], self.output_dim))] # add by Robot Steven ****************************************# self.states = [K.zeros((input_shape[0], self.controller_output_dim)), # h_tm1 K.zeros((input_shape[0], self.controller_output_dim)), # c_tm1] K.zeros((input_shape[0], self.memory_dim * self.memory_size)), # K.zeros((input_shape[0], self.memory_size, self.memory_dim)), K.zeros((input_shape[0], self.num_write_head * self.memory_size)), K.zeros((input_shape[0], self.num_read_head * self.memory_size)), K.zeros((input_shape[0], self.num_read_head * self.memory_dim))] # add by Robot Steven ****************************************# print("end reset_states(self)\n")
def call(self, x, mask=None): if self.mode == 0 or self.mode == 2: assert self.built, 'Layer must be built before being called' input_shape = self.input_spec[0].shape reduction_axes = list(range(len(input_shape))) del reduction_axes[self.axis] broadcast_shape = [1] * len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis] # case: train mode (uses stats of the current batch) mean = K.mean(x, axis=reduction_axes) brodcast_mean = K.reshape(mean, broadcast_shape) std = K.mean(K.square(x - brodcast_mean) + self.epsilon, axis=reduction_axes) std = K.sqrt(std) brodcast_std = K.reshape(std, broadcast_shape) mean_update = self.momentum * self.running_mean + (1 - self.momentum) * mean std_update = self.momentum * self.running_std + (1 - self.momentum) * std if self.mode == 2: x_normed = (x - brodcast_mean) / (brodcast_std + self.epsilon) out = K.reshape(self.gamma, broadcast_shape) * x_normed + K.reshape(self.beta, broadcast_shape) else: # mode 0 self.called_with = x self.updates = [(self.running_mean, mean_update), (self.running_std, std_update)] x_normed = (x - brodcast_mean) / (brodcast_std + self.epsilon) # case: test mode (uses running averages) brodcast_running_mean = K.reshape(self.running_mean, broadcast_shape) brodcast_running_std = K.reshape(self.running_std, broadcast_shape) x_normed_running = ((x - brodcast_running_mean) / (brodcast_running_std + self.epsilon)) # pick the normalized form of x corresponding to the training phase x_normed = K.in_train_phase(x_normed, x_normed_running) out = K.reshape(self.gamma, broadcast_shape) * x_normed + K.reshape(self.beta, broadcast_shape) elif self.mode == 1: # sample-wise normalization m = K.mean(x, axis=-1, keepdims=True) std = K.sqrt(K.var(x, axis=-1, keepdims=True) + self.epsilon) x_normed = (x - m) / (std + self.epsilon) out = self.gamma * x_normed + self.beta return out
def build(self, input_shape): if input_shape[2] % self.shared_pool[0] != 0 or input_shape[3] % self.shared_pool[1] != 0: raise Exception('Layer only works if input dimensions can be divided by shared pool dimensions') nb_x_pools = int(input_shape[2]/self.shared_pool[0]) nb_y_pools = int(input_shape[3]/self.shared_pool[1]) output_shape = self.get_output_shape_for(input_shape) if self.dim_ordering == 'th': _, nb_filter, output_row, output_col = output_shape input_filter = input_shape[1] elif self.dim_ordering == 'tf': _, output_row, output_col, nb_filter = output_shape input_filter = input_shape[3] else: raise Exception('Invalid dim_ordering: ' + self.dim_ordering) self.output_row = output_row self.output_col = output_col self.W_shape = (nb_filter,input_filter,nb_x_pools,nb_y_pools)#(output_row * output_col, self.nb_row * self.nb_col * input_filter, nb_filter) self.W = self.init(self.W_shape, name='{}_W'.format(self.name)) if self.bias: self.b = K.zeros((nb_filter,nb_x_pools, nb_y_pools), name='{}_b'.format(self.name)) self.trainable_weights = [self.W, self.b] else: self.trainable_weights = [self.W] self.regularizers = [] if self.W_regularizer: self.W_regularizer.set_param(self.W) self.regularizers.append(self.W_regularizer) if self.bias and self.b_regularizer: self.b_regularizer.set_param(self.b) self.regularizers.append(self.b_regularizer) if self.activity_regularizer: self.activity_regularizer.set_layer(self) self.regularizers.append(self.activity_regularizer) self.constraints = {} if self.W_constraint: self.constraints[self.W] = self.W_constraint if self.bias and self.b_constraint: self.constraints[self.b] = self.b_constraint if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights
def build(self, input_shape): if input_shape[2] % self.nb_row != 0 or input_shape[3] % self.nb_col != 0: raise Exception('Layer only works if input dimensions can be divided by filter/stride dimensions') output_shape = self.get_output_shape_for(input_shape) if self.dim_ordering == 'th': _, nb_filter, output_row, output_col = output_shape input_filter = input_shape[1] elif self.dim_ordering == 'tf': _, output_row, output_col, nb_filter = output_shape input_filter = input_shape[3] else: raise Exception('Invalid dim_ordering: ' + self.dim_ordering) self.output_row = output_row self.output_col = output_col self.W_shape = (nb_filter,input_filter,input_shape[2],input_shape[3])#(output_row * output_col, self.nb_row * self.nb_col * input_filter, nb_filter) self.W = self.init(self.W_shape, name='{}_W'.format(self.name)) if self.bias: self.b = K.zeros((output_row, output_col, nb_filter), name='{}_b'.format(self.name)) self.trainable_weights = [self.W, self.b] else: self.trainable_weights = [self.W] self.regularizers = [] if self.W_regularizer: self.W_regularizer.set_param(self.W) self.regularizers.append(self.W_regularizer) if self.bias and self.b_regularizer: self.b_regularizer.set_param(self.b) self.regularizers.append(self.b_regularizer) if self.activity_regularizer: self.activity_regularizer.set_layer(self) self.regularizers.append(self.activity_regularizer) self.constraints = {} if self.W_constraint: self.constraints[self.W] = self.W_constraint if self.bias and self.b_constraint: self.constraints[self.b] = self.b_constraint if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights