我们从Python开源项目中,提取了以下5个代码示例,用于说明如何使用tensorflow.python.ops.math_ops.rsqrt()。
def _sample_n(self, n, seed=None): # The sampling method comes from the fact that if: # X ~ Normal(0, 1) # Z ~ Chi2(df) # Y = X / sqrt(Z / df) # then: # Y ~ StudentT(df). shape = array_ops.concat([[n], self.batch_shape()], 0) normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed) df = self.df * array_ops.ones(self.batch_shape(), dtype=self.dtype) gamma_sample = random_ops.random_gamma( [n], 0.5 * df, beta=0.5, dtype=self.dtype, seed=distribution_util.gen_new_seed(seed, salt="student_t")) samples = normal_sample * math_ops.rsqrt(gamma_sample / df) return samples * self.scale + self.loc # Abs(scale) not wanted.
def _layer_norm_compute_python(x, epsilon, scale, bias): """Layer norm raw computation.""" mean = tf.reduce_mean(x, axis=[-1], keep_dims=True) variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keep_dims=True) norm_x = (x - mean) * tf.rsqrt(variance + epsilon) return norm_x * scale + bias
def setUp(self): super(CoreUnaryOpsTest, self).setUp() self.ops = [ ('abs', operator.abs, math_ops.abs, core.abs_function), ('neg', operator.neg, math_ops.negative, core.neg), # TODO(shoyer): add unary + to core TensorFlow ('pos', None, None, None), ('sign', None, math_ops.sign, core.sign), ('reciprocal', None, math_ops.reciprocal, core.reciprocal), ('square', None, math_ops.square, core.square), ('round', None, math_ops.round, core.round_function), ('sqrt', None, math_ops.sqrt, core.sqrt), ('rsqrt', None, math_ops.rsqrt, core.rsqrt), ('log', None, math_ops.log, core.log), ('exp', None, math_ops.exp, core.exp), ('log', None, math_ops.log, core.log), ('ceil', None, math_ops.ceil, core.ceil), ('floor', None, math_ops.floor, core.floor), ('cos', None, math_ops.cos, core.cos), ('sin', None, math_ops.sin, core.sin), ('tan', None, math_ops.tan, core.tan), ('acos', None, math_ops.acos, core.acos), ('asin', None, math_ops.asin, core.asin), ('atan', None, math_ops.atan, core.atan), ('lgamma', None, math_ops.lgamma, core.lgamma), ('digamma', None, math_ops.digamma, core.digamma), ('erf', None, math_ops.erf, core.erf), ('erfc', None, math_ops.erfc, core.erfc), ('lgamma', None, math_ops.lgamma, core.lgamma), ] total_size = np.prod([v.size for v in self.original_lt.axes.values()]) self.test_lt = core.LabeledTensor( math_ops.cast(self.original_lt, dtypes.float32) / total_size, self.original_lt.axes)
def _variance_scale_term(self): """Helper to `_covariance` and `_variance` which computes a shared scale.""" return math_ops.rsqrt(1. + self.alpha_sum[..., None])
def __call__(self, query, previous_alignments): """Score the query based on the keys and values. Args: query: Tensor of dtype matching `self.values` and shape `[batch_size, query_depth]`. previous_alignments: Tensor of dtype matching `self.values` and shape `[batch_size, alignments_size]` (`alignments_size` is memory's `max_time`). Returns: alignments: Tensor of dtype matching `self.values` and shape `[batch_size, alignments_size]` (`alignments_size` is memory's `max_time`). """ with variable_scope.variable_scope(None, "bahdanau_attention", [query]): processed_query = self.query_layer(query) if self.query_layer else query # Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting. processed_query = array_ops.expand_dims(processed_query, 1) keys = self._keys dtype = query.dtype v = variable_scope.get_variable( "attention_v", [self._num_units], dtype=dtype) if self._normalize: # Scalar used in weight normalization g = variable_scope.get_variable( "attention_g", dtype=dtype, initializer=math.sqrt((1. / self._num_units))) # normed_v = g * v / ||v|| normed_v = g * v * math_ops.rsqrt( math_ops.reduce_sum(math_ops.square(v))) score = math_ops.reduce_sum( normed_v * math_ops.tanh(keys + processed_query + b), [2]) else: score = math_ops.reduce_sum(v * math_ops.tanh(keys + processed_query), [2]) alignments = self._probability_fn(score, previous_alignments) return alignments, self.mask_func(score)