我们从Python开源项目中,提取了以下21个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.pad()。
def conv_input_length(output_length, filter_size, padding, stride): """Determines input length of a convolution given output length. Arguments: output_length: integer. filter_size: integer. padding: one of "same", "valid", "full". stride: integer. Returns: The input length (integer). """ if output_length is None: return None assert padding in {'same', 'valid', 'full'} if padding == 'same': pad = filter_size // 2 elif padding == 'valid': pad = 0 elif padding == 'full': pad = filter_size - 1 return (output_length - 1) * stride - 2 * pad + filter_size
def _forward(self, x): # Pad the last dim with a zeros vector. We need this because it lets us # infer the scale in the inverse function. y = array_ops.expand_dims(x, dim=-1) if self._static_event_ndims == 0 else x ndims = (y.get_shape().ndims if y.get_shape().ndims is not None else array_ops.rank(y)) y = array_ops.pad(y, paddings=array_ops.concat(0, ( array_ops.zeros((ndims - 1, 2), dtype=dtypes.int32), [[0, 1]]))) # Set shape hints. if x.get_shape().ndims is not None: shape = x.get_shape().as_list() if self._static_event_ndims == 0: shape += [2] elif shape[-1] is not None: shape[-1] += 1 shape = tensor_shape.TensorShape(shape) y.get_shape().assert_is_compatible_with(shape) y.set_shape(shape) # Since we only support event_ndims in [0, 1] and we do padding, we always # reduce over the last dimension, i.e., dim=-1 (which is the default). return nn_ops.softmax(y)
def resize_audio_with_crop_or_pad(image, target_height, target_width, dynamic_shape=False): image = tf.convert_to_tensor(image, name='audio') original_height, _ = _ImageDimensions(image, dynamic_shape=dynamic_shape) if target_height <= 0: raise ValueError('target_height must be > 0.') if dynamic_shape: max_ = math_ops.maximum min_ = math_ops.minimum else: max_ = max min_ = min height_diff = target_height - original_height offset_crop_height = max_(-height_diff // 2, 0) offset_pad_height = max_(height_diff // 2, 0) # Maybe crop if needed. cropped = crop_to_1d_bounding_box(image, offset_crop_height, min_(target_height, original_height), dynamic_shape=dynamic_shape) # Maybe pad if needed. resized = pad_to_1d_bounding_box(cropped, offset_pad_height, target_height, dynamic_shape=dynamic_shape) if resized.get_shape().ndims is None: raise ValueError('resized contains no shape.') if not resized.get_shape()[0].is_compatible_with(target_height): raise ValueError('resized height is not correct.') return resized # In[5]:
def temporal_padding(x, padding=(1, 1)): """Pads the middle dimension of a 3D tensor. Arguments: x: Tensor or variable. padding: Tuple of 2 integers, how many zeros to add at the start and end of dim 1. Returns: A padded 3D tensor. """ assert len(padding) == 2 pattern = [[0, 0], [padding[0], padding[1]], [0, 0]] return array_ops.pad(x, pattern)
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): """Pads the 2nd and 3rd dimensions of a 4D tensor. Arguments: x: Tensor or variable. padding: Tuple of 2 tuples, padding pattern. data_format: One of `channels_last` or `channels_first`. Returns: A padded 4D tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ assert len(padding) == 2 assert len(padding[0]) == 2 assert len(padding[1]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) if data_format == 'channels_first': pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])] else: pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]] return array_ops.pad(x, pattern)
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None): """Pads 5D tensor with zeros along the depth, height, width dimensions. Pads these dimensions with respectively "padding[0]", "padding[1]" and "padding[2]" zeros left and right. For 'channels_last' data_format, the 2nd, 3rd and 4th dimension will be padded. For 'channels_first' data_format, the 3rd, 4th and 5th dimension will be padded. Arguments: x: Tensor or variable. padding: Tuple of 3 tuples, padding pattern. data_format: One of `channels_last` or `channels_first`. Returns: A padded 5D tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ assert len(padding) == 3 assert len(padding[0]) == 2 assert len(padding[1]) == 2 assert len(padding[2]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) if data_format == 'channels_first': pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]], [padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]] else: pattern = [[0, 0], [padding[0][0], padding[0][1]], [padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]], [0, 0]] return array_ops.pad(x, pattern)
def _strict_1d_cumsum(tensor, len_tensor): """Cumsum of a 1D tensor with defined shape by padding and convolving.""" # Assumes tensor shape is fully defined. with ops.name_scope('strict_1d_cumsum', values=[tensor]): if len_tensor == 0: return constant_op.constant([]) len_pad = len_tensor - 1 x = array_ops.pad(tensor, [[len_pad, 0]]) h = array_ops.ones_like(x) return _strict_conv1d(x, h)[:len_tensor] # TODO(langmore) Remove once a faster cumsum (accumulate_sum) Op is available. # See: https://github.com/tensorflow/tensorflow/issues/813
def test_name(self): pad_lt = ops.pad(self.original_lt, {'x': (1, 1), 'channel': ([], ['alpha'])}) self.assertIn('lt_pad', pad_lt.name)
def test(self): pad_lt = ops.pad(self.original_lt, {'x': (1, 1), 'channel': ([], ['alpha'])}) golden_op = array_ops.pad(self.original_lt.tensor, [[1, 1], [0, 1], [0, 0], [0, 0]]) golden_axes = [('x', self.x_size + 2), ('channel', ['red', 'green', 'blue', 'alpha']), self.a2, self.a3] golden_lt = core.LabeledTensor(golden_op, golden_axes) self.assertLabeledTensorsEqual(pad_lt, golden_lt)
def test_invalid_input(self): with self.assertRaisesRegexp(ValueError, 'are not contained in the set'): ops.pad(self.original_lt, {'foo': (1, 1), 'channel': ([], ['alpha'])})
def _infer_batch_ndims(self): """Return batch_ndims.""" if self._is_only_identity_multiplier: return 0 # The real batch dims is one less when we pad in the case of event_ndims = # 1, and the rank of the underlying scale being 2. This allows us to have # non-negative sample dims. return (self._scale.rank() - 2 - array_ops.where(self._rank_two_event_ndims_one, 1, 0))
def _forward_log_det_jacobian(self, x): if self._is_only_identity_multiplier: # TODO(jvdillon): We don't pad in this case and instead let the fldj be # applied via broadcast. d = math_ops.cast(array_ops.shape(x)[-1], dtype=self._scale.dtype) return math_ops.log(math_ops.abs(self._scale)) * array_ops.where( math_ops.equal(self.shaper.event_ndims, 0), 1., d) fldj = self._scale.sqrt_log_abs_det() # We need to squeeze off the padded dimension. start = array_ops.where(self._rank_two_event_ndims_one, 1, 0) return array_ops.reshape(fldj, array_ops.shape(fldj)[start:])
def _forward(self, x): # Pad the last dim with a zeros vector. We need this because it lets us # infer the scale in the inverse function. y = array_ops.expand_dims(x, dim=-1) if self._static_event_ndims == 0 else x ndims = (y.get_shape().ndims if y.get_shape().ndims is not None else array_ops.rank(y)) y = array_ops.pad(y, paddings=array_ops.concat( (array_ops.zeros( (ndims - 1, 2), dtype=dtypes.int32), [[0, 1]]), 0)) # Set shape hints. if x.get_shape().ndims is not None: shape = x.get_shape().as_list() if self._static_event_ndims == 0: shape += [2] elif shape[-1] is not None: shape[-1] += 1 shape = tensor_shape.TensorShape(shape) y.get_shape().assert_is_compatible_with(shape) y.set_shape(shape) # Since we only support event_ndims in [0, 1] and we do padding, we always # reduce over the last dimension, i.e., dim=-1 (which is the default). return nn_ops.softmax(y)
def testPad(self): for dtype in self.numeric_types: self._testBinary( array_ops.pad, np.array( [[1, 2, 3], [4, 5, 6]], dtype=dtype), np.array( [[1, 2], [2, 1]], dtype=np.int32), expected=np.array( [[0, 0, 0, 0, 0, 0], [0, 0, 1, 2, 3, 0], [0, 0, 4, 5, 6, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], dtype=dtype))
def pad_to_1d_bounding_box(image, offset_height, target_height, dynamic_shape=False): """Pad `image` with zeros to the specified `height` and `width`. Adds `offset_height` rows of zeros on top, `offset_width` columns of zeros on the left, and then pads the image on the bottom and right with zeros until it has dimensions `target_height`, `target_width`. This op does nothing if `offset_*` is zero and the image already has size `target_height` by `target_width`. Args: image: 3-D tensor with shape `[height, width, channels]` offset_height: Number of rows of zeros to add on top. offset_width: Number of columns of zeros to add on the left. target_height: Height of output image. target_width: Width of output image. dynamic_shape: Whether the input image has undertermined shape. If set to `True`, shape information will be retrieved at run time. Default to `False`. Returns: 3-D tensor of shape `[target_height, target_width, channels]` Raises: ValueError: If the shape of `image` is incompatible with the `offset_*` or `target_*` arguments, and `dynamic_shape` is set to `False`. """ image = tf.convert_to_tensor(image, name='image') height, depth = _ImageDimensions(image, dynamic_shape=dynamic_shape) after_padding_height = target_height - offset_height - height if not dynamic_shape: if target_height < height: raise ValueError('target_height must be >= height') if after_padding_height < 0: raise ValueError('target_height not possible given ' 'offset_height and image height') # Do not pad on the depth dimensions. if (dynamic_shape or offset_height or after_padding_height): paddings = array_ops.reshape( array_ops.pack([offset_height, after_padding_height, 0, 0]), [2, 2]) padded = array_ops.pad(image, paddings) if not dynamic_shape: padded.set_shape([target_height, depth]) else: padded = image return padded # In[2]:
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None): """Pads a tensor. See tf.pad. Args: labeled_tensor: The input tensor. paddings: A mapping where the keys are axis names and the values are tuples where the first element is the padding to insert at the beginning of the axis and the second is the padding to insert at the end of the axis. mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC". name: Optional op name. Returns: A tensor with the indicated axes padded, optionally with those axes extended with the provided labels. Raises: ValueError: If the padded axes are not axes in the input tensor. """ with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()): raise ValueError('pad axes %r are not contained in the set of axis ' 'names %r on the input labeled tensor' % (paddings.keys(), labeled_tensor.axes)) new_axes = [] padding_pairs = [] for name, axis in labeled_tensor.axes.items(): if name in paddings: padding_before, padding_after = paddings[name] axis_before = core.Axis(name, padding_before) axis_after = core.Axis(name, padding_after) new_axes.append(core.concat_axes([axis_before, axis, axis_after])) padding_pairs.append((len(axis_before), len(axis_after))) else: new_axes.append(axis) padding_pairs.append((0, 0)) pad_op = array_ops.pad( labeled_tensor.tensor, padding_pairs, mode, name=scope) return core.LabeledTensor(pad_op, new_axes)
def _process_scale(self, scale, event_ndims): """Helper to __init__ which gets scale in batch-ready form. This function expands dimensions of `scale` according to the following table: event_ndims scale.ndims 0 1 0 [1]+S+[1,1] "silent error" 1 [ ]+S+[1,1] "silent error" 2 [ ]+S+[1,1] [1]+S+[ ] 3 [ ]+S+[1,1] [ ]+S+[ ] ... (same) (same) The idea is that we want to convert `scale` into something which can always work for, say, the left-hand argument of `batch_matmul`. Args: scale: `Tensor`. event_ndims: `Tensor` (0D, `int32`). Returns: scale: `Tensor` with dims expanded according to [above] table. batch_ndims: `Tensor` (0D, `int32`). The ndims of the `batch` portion. """ ndims = array_ops.rank(scale) left = math_ops.select( math_ops.reduce_any([ math_ops.reduce_all([ math_ops.equal(ndims, 0), math_ops.equal(event_ndims, 0) ]), math_ops.reduce_all([ math_ops.equal(ndims, 2), math_ops.equal(event_ndims, 1) ])]), 1, 0) right = math_ops.select(math_ops.equal(event_ndims, 0), 2, 0) pad = array_ops.concat(0, ( array_ops.ones([left], dtype=dtypes.int32), array_ops.shape(scale), array_ops.ones([right], dtype=dtypes.int32))) scale = array_ops.reshape(scale, pad) batch_ndims = ndims - 2 + right # For safety, explicitly zero-out the upper triangular part. scale = array_ops.matrix_band_part(scale, -1, 0) if self.validate_args: # matrix_band_part will fail if scale is not at least rank 2. shape = array_ops.shape(scale) assert_square = check_ops.assert_equal( shape[-2], shape[-1], message="Input must be a (batch of) square matrix.") # Assuming lower-triangular means we only need check diag != 0. diag = array_ops.matrix_diag_part(scale) is_non_singular = math_ops.logical_not( math_ops.reduce_any( math_ops.equal(diag, ops.convert_to_tensor(0, dtype=diag.dtype)))) assert_non_singular = control_flow_ops.Assert( is_non_singular, ["Singular matrix encountered", diag]) scale = control_flow_ops.with_dependencies( [assert_square, assert_non_singular], scale) return scale, batch_ndims
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None): """Strided 2-D convolution with 'SAME' padding. When stride > 1, then we do explicit zero-padding, followed by conv2d with 'VALID' padding. Note that net = conv2d_same(inputs, num_outputs, 3, stride=stride) is equivalent to net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME') net = subsample(net, factor=stride) whereas net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME') is different when the input's height or width is even, which is why we add the current function. For more details, see ResnetUtilsTest.testConv2DSameEven(). Args: inputs: A 4-D tensor of size [batch, height_in, width_in, channels]. num_outputs: An integer, the number of output filters. kernel_size: An int with the kernel_size of the filters. stride: An integer, the output stride. rate: An integer, rate for atrous convolution. scope: Scope. Returns: output: A 4-D tensor of size [batch, height_out, width_out, channels] with the convolution output. """ if stride == 1: return layers_lib.conv2d( inputs, num_outputs, kernel_size, stride=1, rate=rate, padding='SAME', scope=scope) else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg inputs = array_ops.pad( inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) return layers_lib.conv2d( inputs, num_outputs, kernel_size, stride=stride, rate=rate, padding='VALID', scope=scope)
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None): """Pads a tensor. See tf.pad. Args: labeled_tensor: The input tensor. paddings: A mapping where the keys are axis names and the values are tuples where the first element is the padding to insert at the beginning of the axis and the second is the padding to insert at the end of the axis. mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC". name: Optional op name. Returns: A tensor with the indicated axes padded, optionally with those axes extended with the provided labels. Raises: ValueError: If the padded axes are not axes in the input tensor. """ with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()): raise ValueError('pad axes %r are not contained in the set of axis ' 'names %r on the input labeled tensor' % (paddings.keys(), labeled_tensor.axes)) new_axes = [] padding_pairs = [] for name, axis in labeled_tensor.axes.items(): if name in paddings: padding_before, padding_after = paddings[name] axis_before = core.Axis(name, padding_before) axis_after = core.Axis(name, padding_after) new_axes.append(core.concat_axes([axis_before, axis, axis_after])) padding_pairs.append((len(axis_before), len(axis_after))) else: new_axes.append(axis) padding_pairs.append((0, 0)) pad_op = array_ops.pad(labeled_tensor.tensor, padding_pairs, mode, name=scope) return core.LabeledTensor(pad_op, new_axes)