我们从Python开源项目中,提取了以下4个代码示例,用于说明如何使用keras.initializations.identity()。
def buildAttention(self): q_relu = self.tensors['q_relu'] a_relu = self.tensors['a_relu'] with tf.name_scope("attention"): W = identity([self.params['nb_filter'], self.params['nb_filter']], name='W') batch = tf.shape(q_relu)[0] q_matmul = tf.batch_matmul(q_relu, tf.tile(tf.expand_dims(W,[0]), tf.pack([batch, tf.constant(1), tf.constant(1)]))) qa_attention = tf.batch_matmul(q_matmul, a_relu, adj_x=False, adj_y=True, name="attention") # shape = (batch, q_length, 1) qa_attention = tf.tanh(qa_attention) q_max = tf.reduce_max(qa_attention, reduction_indices=[2], keep_dims=True, name='q_max') # shape = (batch, 1, a_length) a_max = tf.reduce_max(qa_attention, reduction_indices=[1], keep_dims=True, name='a_max') # shape = (batch, q_length, 1) q_softmax = tf.expand_dims(tf.nn.softmax(tf.squeeze(q_max, [2])), -1) # shape = (batch, a_length, 1) a_softmax = tf.expand_dims(tf.nn.softmax(tf.squeeze(a_max, [1])), -1) # https://www.tensorflow.org/versions/r0.9/api_docs/python/math_ops.html#batch_matmul # shape = (batch, NUM_FILTERS, 1) q_feature = tf.batch_matmul(q_relu, q_softmax, adj_x=True, adj_y=False) a_feature = tf.batch_matmul(a_relu, a_softmax, adj_x=True, adj_y=False) self.tensors['q_feature'] = q_feature self.tensors['a_feature'] = a_feature self.tensors.setdefault('weights', []).append(W)
def test_identity(tensor_shape): if len(tensor_shape) > 2: with pytest.raises(Exception): _runner(initializations.identity, tensor_shape, target_mean=1./SHAPE[0], target_max=1.) else: _runner(initializations.identity, tensor_shape, target_mean=1./SHAPE[0], target_max=1.)
def test_identity(tensor_shape): if len(tensor_shape) > 2: with pytest.raises(Exception): _runner(initializations.identity, tensor_shape, target_mean=1. / SHAPE[0], target_max=1.) else: _runner(initializations.identity, tensor_shape, target_mean=1. / SHAPE[0], target_max=1.)