我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.framework.tensor_shape.TensorShape()。
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': rows = input_shape[2] cols = input_shape[3] else: rows = input_shape[1] cols = input_shape[2] rows = conv_utils.conv_output_length(rows, self.kernel_size[0], self.padding, self.strides[0]) cols = conv_utils.conv_output_length(cols, self.kernel_size[1], self.padding, self.strides[1]) if self.data_format == 'channels_first': return tensor_shape.TensorShape( [input_shape[0], self.filters, rows, cols]) else: return tensor_shape.TensorShape( [input_shape[0], rows, cols, self.filters])
def _transpose_batch_time(x): """Transpose the batch and time dimensions of a Tensor. Retains as much of the static shape information as possible. Args: x: A tensor of rank 2 or higher. Returns: x transposed along the first two dimensions. Raises: ValueError: if `x` is rank 1 or lower. """ x_static_shape = x.get_shape() if x_static_shape.ndims is not None and x_static_shape.ndims < 2: raise ValueError( "Expected input tensor %s to have rank at least 2, but saw shape: %s" % (x, x_static_shape)) x_rank = array_ops.rank(x) x_t = array_ops.transpose( x, array_ops.concat( ([1, 0], math_ops.range(2, x_rank)), axis=0)) x_t.set_shape( tensor_shape.TensorShape([ x_static_shape[1].value, x_static_shape[0].value ]).concatenate(x_static_shape[2:])) return x_t
def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() param_shape = input_shape[1:] self.param_broadcast = [False] * len(param_shape) if self.shared_axes is not None: for i in self.shared_axes: param_shape[i - 1] = 1 self.param_broadcast[i - 1] = True self.alpha = self.add_weight( shape=param_shape, name='alpha', initializer=self.alpha_initializer, regularizer=self.alpha_regularizer, constraint=self.alpha_constraint) # Set input spec axes = {} if self.shared_axes: for i in range(1, len(input_shape)): if i not in self.shared_axes: axes[i] = input_shape[i] self.input_spec = InputSpec(ndim=len(input_shape), axes=axes) self.built = True
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': height = self.size[0] * input_shape[ 2] if input_shape[2] is not None else None width = self.size[1] * input_shape[ 3] if input_shape[3] is not None else None return tensor_shape.TensorShape( [input_shape[0], input_shape[1], height, width]) else: height = self.size[0] * input_shape[ 1] if input_shape[1] is not None else None width = self.size[1] * input_shape[ 2] if input_shape[2] is not None else None return tensor_shape.TensorShape( [input_shape[0], height, width, input_shape[3]])
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': dim1 = self.size[0] * input_shape[ 2] if input_shape[2] is not None else None dim2 = self.size[1] * input_shape[ 3] if input_shape[3] is not None else None dim3 = self.size[2] * input_shape[ 4] if input_shape[4] is not None else None return tensor_shape.TensorShape( [input_shape[0], input_shape[1], dim1, dim2, dim3]) else: dim1 = self.size[0] * input_shape[ 1] if input_shape[1] is not None else None dim2 = self.size[1] * input_shape[ 2] if input_shape[2] is not None else None dim3 = self.size[2] * input_shape[ 3] if input_shape[3] is not None else None return tensor_shape.TensorShape( [input_shape[0], dim1, dim2, dim3, input_shape[4]])
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': if input_shape[2] is not None: rows = input_shape[2] + self.padding[0][0] + self.padding[0][1] else: rows = None if input_shape[3] is not None: cols = input_shape[3] + self.padding[1][0] + self.padding[1][1] else: cols = None return tensor_shape.TensorShape( [input_shape[0], input_shape[1], rows, cols]) elif self.data_format == 'channels_last': if input_shape[1] is not None: rows = input_shape[1] + self.padding[0][0] + self.padding[0][1] else: rows = None if input_shape[2] is not None: cols = input_shape[2] + self.padding[1][0] + self.padding[1][1] else: cols = None return tensor_shape.TensorShape( [input_shape[0], rows, cols, input_shape[3]])
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() # pylint: disable=invalid-unary-operand-type if self.data_format == 'channels_first': return tensor_shape.TensorShape([ input_shape[0], input_shape[1], input_shape[2] - self.cropping[0][0] - self.cropping[0][1] if input_shape[2] else None, input_shape[3] - self.cropping[1][0] - self.cropping[1][1] if input_shape[3] else None ]) else: return tensor_shape.TensorShape([ input_shape[0], input_shape[1] - self.cropping[0][0] - self.cropping[0][1] if input_shape[1] else None, input_shape[2] - self.cropping[1][0] - self.cropping[1][1] if input_shape[2] else None, input_shape[3] ]) # pylint: enable=invalid-unary-operand-type
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': rows = input_shape[2] cols = input_shape[3] elif self.data_format == 'channels_last': rows = input_shape[1] cols = input_shape[2] rows = conv_utils.conv_output_length(rows, self.kernel_size[0], self.padding, self.strides[0]) cols = conv_utils.conv_output_length(cols, self.kernel_size[1], self.padding, self.strides[1]) if self.data_format == 'channels_first': return tensor_shape.TensorShape( [input_shape[0], self.filters, rows, cols]) elif self.data_format == 'channels_last': return tensor_shape.TensorShape( [input_shape[0], rows, cols, self.filters])
def build(self, input_shape): # Used purely for shape validation. if not isinstance(input_shape, list): raise ValueError('`Concatenate` layer should be called ' 'on a list of inputs') if all([shape is None for shape in input_shape]): return reduced_inputs_shapes = [ tensor_shape.TensorShape(shape).as_list() for shape in input_shape ] shape_set = set() for i in range(len(reduced_inputs_shapes)): del reduced_inputs_shapes[i][self.axis] shape_set.add(tuple(reduced_inputs_shapes[i])) if len(shape_set) > 1: raise ValueError('`Concatenate` layer requires ' 'inputs with matching shapes ' 'except for the concat axis. ' 'Got inputs shapes: %s' % (input_shape)) self.built = True
def build(self, input_shape): # Used purely for shape validation. if not isinstance(input_shape, list) or len(input_shape) != 2: raise ValueError('A `Dot` layer should be called ' 'on a list of 2 inputs.') shape1 = tensor_shape.TensorShape(input_shape[0]).as_list() shape2 = tensor_shape.TensorShape(input_shape[1]).as_list() if shape1 is None or shape2 is None: return if isinstance(self.axes, int): if self.axes < 0: axes = [self.axes % len(shape1), self.axes % len(shape2)] else: axes = [self.axes] * 2 else: axes = self.axes if shape1[axes[0]] != shape2[axes[1]]: raise ValueError('Dimension incompatibility ' '%s != %s. ' % (shape1[axes[0]], shape2[axes[1]]) + 'Layer shapes: %s, %s' % (shape1, shape2)) self.built = True
def _compute_output_shape(self, input_shape): if not isinstance(input_shape, list) or len(input_shape) != 2: raise ValueError('A `Dot` layer should be called ' 'on a list of 2 inputs.') shape1 = tensor_shape.TensorShape(input_shape[0]).as_list() shape2 = tensor_shape.TensorShape(input_shape[1]).as_list() if isinstance(self.axes, int): if self.axes < 0: axes = [self.axes % len(shape1), self.axes % len(shape2)] else: axes = [self.axes] * 2 else: axes = self.axes shape1.pop(axes[0]) shape2.pop(axes[1]) shape2.pop(0) output_shape = shape1 + shape2 if len(output_shape) == 1: output_shape += [1] return tensor_shape.TensorShape(output_shape)
def _tensor_shape_tensor_conversion_function(s, dtype=None, name=None, as_ref=False): _ = as_ref if not s.is_fully_defined(): raise ValueError( "Cannot convert a partially known TensorShape to a Tensor: %s" % s) s_list = s.as_list() int64_value = 0 for dim in s_list: if dim >= 2**31: int64_value = dim break if dtype is not None: if dtype not in (dtypes.int32, dtypes.int64): raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype) if dtype == dtypes.int32 and int64_value: raise ValueError("Cannot convert a TensorShape to dtype int32; " "a dimension is too large (%s)" % int64_value) else: dtype = dtypes.int64 if int64_value else dtypes.int32 if name is None: name = "shape_as_tensor" return constant(s_list, dtype=dtype, name=name)
def first_dimension(shape, min_rank=1): """Returns the first dimension of shape while checking it has min_rank. Args: shape: A `TensorShape`. min_rank: Integer, minimum rank of shape. Returns: The value of the first dimension. Raises: ValueError: if inputs don't have at least min_rank dimensions, or if the first dimension value is not defined. """ dims = shape.dims if dims is None: raise ValueError('dims of shape must be known but is None') if len(dims) < min_rank: raise ValueError('rank of shape must be at least %d not: %d' % (min_rank, len(dims))) value = dims[0].value if value is None: raise ValueError('first dimension shape must be known but is None') return value
def last_dimension(shape, min_rank=1): """Returns the last dimension of shape while checking it has min_rank. Args: shape: A `TensorShape`. min_rank: Integer, minimum rank of shape. Returns: The value of the last dimension. Raises: ValueError: if inputs don't have at least min_rank dimensions, or if the last dimension value is not defined. """ dims = shape.dims if dims is None: raise ValueError('dims of shape must be known but is None') if len(dims) < min_rank: raise ValueError('rank of shape must be at least %d not: %d' % (min_rank, len(dims))) value = dims[-1].value if value is None: raise ValueError('last dimension shape must be known but is None') return value
def __init__(self, shape, dtype, verify_pd=True, name="OperatorPDIdentity"): """Initialize an `OperatorPDIdentity`. Args: shape: `int32` rank 1 `Tensor` of length at least 2, and with the last two entries equal (since this is a square matrix). dtype: Data type of the matrix that this operator represents. verify_pd: `Boolean`, if `True`, asserts are added to the initialization args to ensure they define this operator as a square (batch) matrix. name: Name to prepend to `Ops`. """ # Grab static shape if available now. with ops.name_scope(name): with ops.name_scope("init", values=[shape]): self._dtype = dtypes.as_dtype(dtype) self._verify_pd = verify_pd self._name = name # Store the static shape (if possible) right now before adding the # asserts, since the asserts prevent .constant_value from working. shape = ops.convert_to_tensor(shape, name="shape") self._get_shape = tensor_shape.TensorShape( tensor_util.constant_value(shape)) self._shape_arg = self._check_shape(shape)
def _forward(self, x): # Pad the last dim with a zeros vector. We need this because it lets us # infer the scale in the inverse function. y = array_ops.expand_dims(x, dim=-1) if self._static_event_ndims == 0 else x ndims = (y.get_shape().ndims if y.get_shape().ndims is not None else array_ops.rank(y)) y = array_ops.pad(y, paddings=array_ops.concat(0, ( array_ops.zeros((ndims - 1, 2), dtype=dtypes.int32), [[0, 1]]))) # Set shape hints. if x.get_shape().ndims is not None: shape = x.get_shape().as_list() if self._static_event_ndims == 0: shape += [2] elif shape[-1] is not None: shape[-1] += 1 shape = tensor_shape.TensorShape(shape) y.get_shape().assert_is_compatible_with(shape) y.set_shape(shape) # Since we only support event_ndims in [0, 1] and we do padding, we always # reduce over the last dimension, i.e., dim=-1 (which is the default). return nn_ops.softmax(y)
def random_positive_definite_matrix(shape, dtype, force_well_conditioned=False): """[batch] positive definite matrix. Args: shape: `TensorShape` or Python list. Shape of the returned matrix. dtype: `TensorFlow` `dtype` or Python dtype. force_well_conditioned: Python bool. If `True`, returned matrix has eigenvalues with modulus in `(1, 4)`. Otherwise, eigenvalues are chi-squared random variables. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) if not contrib_tensor_util.is_tensor(shape): shape = tensor_shape.TensorShape(shape) # Matrix must be square. shape[-1].assert_is_compatible_with(shape[-2]) with ops.name_scope("random_positive_definite_matrix"): tril = random_tril_matrix( shape, dtype, force_well_conditioned=force_well_conditioned) return math_ops.matmul(tril, tril, adjoint_b=True)
def _shape(self): # Get final matrix shape. domain_dimension = self.operators[0].domain_dimension for operator in self.operators[1:]: domain_dimension.assert_is_compatible_with(operator.range_dimension) domain_dimension = operator.domain_dimension matrix_shape = tensor_shape.TensorShape( [self.operators[0].range_dimension, self.operators[-1].domain_dimension]) # Get broadcast batch shape. # broadcast_shape checks for compatibility. batch_shape = self.operators[0].batch_shape for operator in self.operators[1:]: batch_shape = common_shapes.broadcast_shape( batch_shape, operator.batch_shape) return batch_shape.concatenate(matrix_shape)
def testShapeGetters(self): with self.test_session(): bijector = bijectors.Inline( forward_event_shape_fn=lambda x: array_ops.concat((x, [1]), 0), get_forward_event_shape_fn=lambda x: x.as_list() + [1], inverse_event_shape_fn=lambda x: x[:-1], get_inverse_event_shape_fn=lambda x: x[:-1], name="shape_only") x = tensor_shape.TensorShape([1, 2, 3]) y = tensor_shape.TensorShape([1, 2, 3, 1]) self.assertAllEqual(y, bijector.get_forward_event_shape(x)) self.assertAllEqual(y.as_list(), bijector.forward_event_shape(x.as_list()).eval()) self.assertAllEqual(x, bijector.get_inverse_event_shape(y)) self.assertAllEqual(x.as_list(), bijector.inverse_event_shape(y.as_list()).eval())
def _GetFakeDistribution(self): class FakeDistribution(ds.Distribution): """Fake Distribution for testing _set_sample_static_shape.""" def __init__(self, batch_shape=None, event_shape=None): self._static_batch_shape = tensor_shape.TensorShape(batch_shape) self._static_event_shape = tensor_shape.TensorShape(event_shape) super(FakeDistribution, self).__init__( dtype=dtypes.float32, is_continuous=False, reparameterization_type=distributions.NOT_REPARAMETERIZED, validate_args=True, allow_nan_stats=True, name="DummyDistribution") def _get_batch_shape(self): return self._static_batch_shape def _get_event_shape(self): return self._static_event_shape return FakeDistribution
def output_size(self): """A (possibly nested tuple of...) integer[s] or `TensorShape` object[s].""" raise NotImplementedError
def _create_zero_outputs(size, dtype, batch_size): """Create a zero outputs Tensor structure.""" def _t(s): return (s if isinstance(s, ops.Tensor) else constant_op.constant( tensor_shape.TensorShape(s).as_list(), dtype=dtypes.int32, name="zero_suffix_shape")) def _create(s, d): return array_ops.zeros( array_ops.concat( ([batch_size], _t(s)), axis=0), dtype=d) return nest.map_structure(_create, size, dtype)
def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if len(input_shape) < 4: raise ValueError('Inputs to `SeparableConv2D` should have rank 4. ' 'Received input shape:', str(input_shape)) if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = 3 if input_shape[channel_axis] is None: raise ValueError('The channel dimension of the inputs to ' '`SeparableConv2D` ' 'should be defined. Found `None`.') input_dim = int(input_shape[channel_axis]) depthwise_kernel_shape = (self.kernel_size[0], self.kernel_size[1], input_dim, self.depth_multiplier) self.depthwise_kernel = self.add_weight( shape=depthwise_kernel_shape, initializer=self.depthwise_initializer, name='depthwise_kernel', regularizer=self.depthwise_regularizer, constraint=self.depthwise_constraint) if self.use_bias: self.bias = self.add_weight( shape=(self.filters,), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None # Set input spec. self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim}) self.built = True
def state_size(self): return AttentionWrapperState( cell_state=self._cell.state_size, time=tensor_shape.TensorShape([]), attention=self._attention_size, alignments=self._attention_mechanism.alignments_size, alignment_history=()) # alignment_history is sometimes a TensorArray
def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape) if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = -1 if input_shape[channel_axis].value is None: raise ValueError('The channel dimension of the inputs ' 'should be defined. Found `None`.') input_dim = input_shape[channel_axis].value kernel_shape = self.kernel_size + (input_dim, self.filters) # dense kernel self.kernel_pre = self.add_variable(name='kernel_pre', shape=kernel_shape, initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, trainable=True, dtype=self.dtype) conv_th = tf.ones_like(self.kernel_pre) * self.sparse_th conv_zero = tf.zeros_like(self.kernel_pre) cond = tf.less(tf.abs(self.kernel_pre), conv_th) self.kernel = tf.where(cond, conv_zero, self.kernel_pre, name='kernel') if self.use_bias: self.bias = self.add_variable(name='bias', shape=(self.filters,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, trainable=True, dtype=self.dtype) else: self.bias = None self.input_spec = base.InputSpec(ndim=self.rank + 2, axes={channel_axis: input_dim}) self.built = True
def _compute_output_shape(self, input_shape): if isinstance(input_shape, list): input_shape = input_shape[0] input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': rows = input_shape[3] cols = input_shape[4] elif self.data_format == 'channels_last': rows = input_shape[2] cols = input_shape[3] rows = conv_utils.conv_output_length( rows, self.kernel_size[0], padding=self.padding, stride=self.strides[0], dilation=self.dilation_rate[0]) cols = conv_utils.conv_output_length( cols, self.kernel_size[1], padding=self.padding, stride=self.strides[1], dilation=self.dilation_rate[1]) if self.return_sequences: if self.data_format == 'channels_first': return tensor_shape.TensorShape( [input_shape[0], input_shape[1], self.filters, rows, cols]) elif self.data_format == 'channels_last': return tensor_shape.TensorShape( [input_shape[0], input_shape[1], rows, cols, self.filters]) else: if self.data_format == 'channels_first': return tensor_shape.TensorShape( [input_shape[0], self.filters, rows, cols]) elif self.data_format == 'channels_last': return tensor_shape.TensorShape( [input_shape[0], rows, cols, self.filters])
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() size = self.size * input_shape[1] if input_shape[1] is not None else None return tensor_shape.TensorShape([input_shape[0], size, input_shape[2]])
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': if input_shape[2] is not None: dim1 = input_shape[2] + 2 * self.padding[0][0] else: dim1 = None if input_shape[3] is not None: dim2 = input_shape[3] + 2 * self.padding[1][0] else: dim2 = None if input_shape[4] is not None: dim3 = input_shape[4] + 2 * self.padding[2][0] else: dim3 = None return tensor_shape.TensorShape( [input_shape[0], input_shape[1], dim1, dim2, dim3]) elif self.data_format == 'channels_last': if input_shape[1] is not None: dim1 = input_shape[1] + 2 * self.padding[0][1] else: dim1 = None if input_shape[2] is not None: dim2 = input_shape[2] + 2 * self.padding[1][1] else: dim2 = None if input_shape[3] is not None: dim3 = input_shape[3] + 2 * self.padding[2][1] else: dim3 = None return tensor_shape.TensorShape( [input_shape[0], dim1, dim2, dim3, input_shape[4]])
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if input_shape[1] is not None: length = input_shape[1] - self.cropping[0] - self.cropping[1] else: length = None return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() # pylint: disable=invalid-unary-operand-type if self.data_format == 'channels_first': if input_shape[2] is not None: dim1 = input_shape[2] - self.cropping[0][0] - self.cropping[0][1] else: dim1 = None if input_shape[3] is not None: dim2 = input_shape[3] - self.cropping[1][0] - self.cropping[1][1] else: dim2 = None if input_shape[4] is not None: dim3 = input_shape[4] - self.cropping[2][0] - self.cropping[2][1] else: dim3 = None return tensor_shape.TensorShape( [input_shape[0], input_shape[1], dim1, dim2, dim3]) elif self.data_format == 'channels_last': if input_shape[1] is not None: dim1 = input_shape[1] - self.cropping[0][0] - self.cropping[0][1] else: dim1 = None if input_shape[2] is not None: dim2 = input_shape[2] - self.cropping[1][0] - self.cropping[1][1] else: dim2 = None if input_shape[3] is not None: dim3 = input_shape[3] - self.cropping[2][0] - self.cropping[2][1] else: dim3 = None return tensor_shape.TensorShape( [input_shape[0], dim1, dim2, dim3, input_shape[4]]) # pylint: enable=invalid-unary-operand-type
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() output_shape = copy.copy(input_shape) for i, dim in enumerate(self.dims): target_dim = input_shape[dim] output_shape[i + 1] = target_dim return tensor_shape.TensorShape(output_shape)
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if not all(input_shape[1:]): raise ValueError('The shape of the input to "Flatten" ' 'is not fully defined ' '(got ' + str(input_shape[1:]) + '. ' 'Make sure to pass a complete "input_shape" ' 'or "batch_input_shape" argument to the first ' 'layer in your model.') return tensor_shape.TensorShape([input_shape[0], np.prod(input_shape[1:])])
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() return tensor_shape.TensorShape([input_shape[0], self.n, input_shape[1]])
def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() self.embeddings = self.add_weight( shape=(self.input_dim, self.output_dim), initializer=self.embeddings_initializer, name='embeddings', regularizer=self.embeddings_regularizer, constraint=self.embeddings_constraint) self.built = True
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if not self.input_length: input_length = input_shape[1] else: input_length = self.input_length return tensor_shape.TensorShape( [input_shape[0], input_length, self.output_dim])
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() length = conv_utils.conv_output_length(input_shape[1], self.kernel_size[0], self.padding, self.strides[0]) return tensor_shape.TensorShape([input_shape[0], length, self.filters])
def build(self, input_shape): # Used purely for shape validation. if not isinstance(input_shape, list): raise ValueError('A merge layer should be called ' 'on a list of inputs.') if len(input_shape) < 2: raise ValueError('A merge layer should be called ' 'on a list of at least 2 inputs. ' 'Got ' + str(len(input_shape)) + ' inputs.') input_shape = [tensor_shape.TensorShape(s).as_list() for s in input_shape] batch_sizes = [s[0] for s in input_shape if s is not None] batch_sizes = set(batch_sizes) batch_sizes -= set([None]) if len(batch_sizes) > 1: raise ValueError('Can not merge tensors with different ' 'batch sizes. Got tensors with shapes : ' + str(input_shape)) if input_shape[0] is None: output_shape = None else: output_shape = input_shape[0][1:] for i in range(1, len(input_shape)): if input_shape[i] is None: shape = None else: shape = input_shape[i][1:] output_shape = self._compute_elemwise_op_output_shape(output_shape, shape) # If the inputs have different ranks, we have to reshape them # to make them broadcastable. if None not in input_shape and len(set(map(len, input_shape))) == 1: self._reshape_required = False else: self._reshape_required = True self.built = True
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() return tensor_shape.TensorShape([input_shape[0], input_shape[2]])
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_last': return tensor_shape.TensorShape([input_shape[0], input_shape[3]]) else: return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_last': return tensor_shape.TensorShape([input_shape[0], input_shape[4]]) else: return tensor_shape.TensorShape([input_shape[0], input_shape[1]])