Python tensorflow.python.framework.tensor_shape 模块,as_dimension() 实例源码

我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用tensorflow.python.framework.tensor_shape.as_dimension()

项目:VAEGAN    作者:jlindsey15    | 项目源码 | 文件源码
def get2d_deconv_output_size(input_height, input_width, filter_height,
                           filter_width, row_stride, col_stride, padding_type):
    """Returns the number of rows and columns in a convolution/pooling output."""
    input_height = tensor_shape.as_dimension(input_height)
    input_width = tensor_shape.as_dimension(input_width)
    filter_height = tensor_shape.as_dimension(filter_height)
    filter_width = tensor_shape.as_dimension(filter_width)
    row_stride = int(row_stride)
    col_stride = int(col_stride)

    # Compute number of rows in the output, based on the padding.
    if input_height.value is None or filter_height.value is None:
      out_rows = None
    elif padding_type == "VALID":
      out_rows = (input_height.value - 1) * row_stride + filter_height.value 
    elif padding_type == "SAME":
      out_rows = input_height.value * row_stride
    else:
      raise ValueError("Invalid value for padding: %r" % padding_type)

    # Compute number of columns in the output, based on the padding.
    if input_width.value is None or filter_width.value is None:
      out_cols = None
    elif padding_type == "VALID":
      out_cols = (input_width.value - 1) * col_stride + filter_width.value
    elif padding_type == "SAME":
      out_cols = input_width.value * col_stride

    return out_rows, out_cols
项目:VAE_tensorflow    作者:int8    | 项目源码 | 文件源码
def get2d_deconv_output_size(input_height, input_width, filter_height,
                           filter_width, row_stride, col_stride, padding_type):
    """Returns the number of rows and columns in a convolution/pooling output."""
    input_height = tensor_shape.as_dimension(input_height)
    input_width = tensor_shape.as_dimension(input_width)
    filter_height = tensor_shape.as_dimension(filter_height)
    filter_width = tensor_shape.as_dimension(filter_width)
    row_stride = int(row_stride)
    col_stride = int(col_stride)

    # Compute number of rows in the output, based on the padding.
    if input_height.value is None or filter_height.value is None:
      out_rows = None
    elif padding_type == "VALID":
      out_rows = (input_height.value - 1) * row_stride + filter_height.value 
    elif padding_type == "SAME":
      out_rows = input_height.value * row_stride
    else:
      raise ValueError("Invalid value for padding: %r" % padding_type)

    # Compute number of columns in the output, based on the padding.
    if input_width.value is None or filter_width.value is None:
      out_cols = None
    elif padding_type == "VALID":
      out_cols = (input_width.value - 1) * col_stride + filter_width.value
    elif padding_type == "SAME":
      out_cols = input_width.value * col_stride

    return out_rows, out_cols
项目:BGAN    作者:htconquer    | 项目源码 | 文件源码
def get2d_deconv_output_size(input_height, input_width, filter_height,
                           filter_width, row_stride, col_stride, padding_type):
    """Returns the number of rows and columns in a convolution/pooling output."""
    input_height = tensor_shape.as_dimension(input_height)
    input_width = tensor_shape.as_dimension(input_width)
    filter_height = tensor_shape.as_dimension(filter_height)
    filter_width = tensor_shape.as_dimension(filter_width)
    row_stride = int(row_stride)
    col_stride = int(col_stride)

    # Compute number of rows in the output, based on the padding.
    if input_height.value is None or filter_height.value is None:
      out_rows = None
    elif padding_type == "VALID":
      out_rows = (input_height.value - 1) * row_stride + filter_height.value 
    elif padding_type == "SAME":
      out_rows = input_height.value * row_stride
    else:
      raise ValueError("Invalid value for padding: %r" % padding_type)

    # Compute number of columns in the output, based on the padding.
    if input_width.value is None or filter_width.value is None:
      out_cols = None
    elif padding_type == "VALID":
      out_cols = (input_width.value - 1) * col_stride + filter_width.value
    elif padding_type == "SAME":
      out_cols = input_width.value * col_stride

    return out_rows, out_cols
项目:DP-VAE    作者:thudzj    | 项目源码 | 文件源码
def get2d_deconv_output_size(input_height, input_width, filter_height,
                           filter_width, row_stride, col_stride, padding_type):
    """Returns the number of rows and columns in a convolution/pooling output."""
    input_height = tensor_shape.as_dimension(input_height)
    input_width = tensor_shape.as_dimension(input_width)
    filter_height = tensor_shape.as_dimension(filter_height)
    filter_width = tensor_shape.as_dimension(filter_width)
    row_stride = int(row_stride)
    col_stride = int(col_stride)

    # Compute number of rows in the output, based on the padding.
    if input_height.value is None or filter_height.value is None:
      out_rows = None
    elif padding_type == "VALID":
      out_rows = (input_height.value - 1) * row_stride + filter_height.value 
    elif padding_type == "SAME":
      out_rows = input_height.value * row_stride
    else:
      raise ValueError("Invalid value for padding: %r" % padding_type)

    # Compute number of columns in the output, based on the padding.
    if input_width.value is None or filter_width.value is None:
      out_cols = None
    elif padding_type == "VALID":
      out_cols = (input_width.value - 1) * col_stride + filter_width.value
    elif padding_type == "SAME":
      out_cols = input_width.value * col_stride

    return out_rows, out_cols
项目:divcolor    作者:aditya12agd5    | 项目源码 | 文件源码
def __encoder(self, scope, input_tensor, bn_is_training, keep_prob, in_nch=1, reuse=False):

        lf = self.layer_factory

        input_tensor2d = tf.reshape(input_tensor, [self.flags.batch_size, \
                self.flags.img_height, self.flags.img_width, in_nch])

        if(self.nch == 1 and reuse==False):
            tf.image_summary('summ_input_tensor2d', input_tensor2d, max_images=10)

        nch = tensor_shape.as_dimension(input_tensor2d.get_shape()[3]).value

        if(reuse==False):
            W_conv1 = lf.weight_variable(name='W_conv1', shape=[5, 5, nch, 128])
            W_conv2 = lf.weight_variable(name='W_conv2', shape=[5, 5, 128, 256])
            W_conv3 = lf.weight_variable(name='W_conv3', shape=[5, 5, 256, 512])
            W_conv4 = lf.weight_variable(name='W_conv4', shape=[4, 4, 512, 1024])
            W_fc1 = lf.weight_variable(name='W_fc1', shape=[4*4*1024, self.flags.hidden_size * 2])

            b_conv1 = lf.bias_variable(name='b_conv1', shape=[128])
            b_conv2 = lf.bias_variable(name='b_conv2', shape=[256])
            b_conv3 = lf.bias_variable(name='b_conv3', shape=[512])
            b_conv4 = lf.bias_variable(name='b_conv4', shape=[1024])
            b_fc1 = lf.bias_variable(name='b_fc1', shape=[self.flags.hidden_size * 2])
        else:
            W_conv1 = lf.weight_variable(name='W_conv1')
            W_conv2 = lf.weight_variable(name='W_conv2')
            W_conv3 = lf.weight_variable(name='W_conv3')
            W_conv4 = lf.weight_variable(name='W_conv4')
            W_fc1 = lf.weight_variable(name='W_fc1')

            b_conv1 = lf.bias_variable(name='b_conv1')
            b_conv2 = lf.bias_variable(name='b_conv2')
            b_conv3 = lf.bias_variable(name='b_conv3')
            b_conv4 = lf.bias_variable(name='b_conv4')
            b_fc1 = lf.bias_variable(name='b_fc1')

        conv1 = tf.nn.relu(lf.conv2d(input_tensor2d, W_conv1, stride=2) + b_conv1)
        conv1_norm = lf.batch_norm_aiuiuc_wrapper(conv1, bn_is_training, \
            'BN1', reuse_vars=reuse)

        conv2 = tf.nn.relu(lf.conv2d(conv1_norm, W_conv2, stride=2) + b_conv2)
        conv2_norm = lf.batch_norm_aiuiuc_wrapper(conv2, bn_is_training, \
            'BN2', reuse_vars=reuse)

        conv3 = tf.nn.relu(lf.conv2d(conv2_norm, W_conv3, stride=2) + b_conv3)
        conv3_norm = lf.batch_norm_aiuiuc_wrapper(conv3, bn_is_training, \
            'BN3', reuse_vars=reuse)

        conv4 = tf.nn.relu(lf.conv2d(conv3_norm, W_conv4, stride=2) + b_conv4)
        conv4_norm = lf.batch_norm_aiuiuc_wrapper(conv4, bn_is_training, \
            'BN4', reuse_vars=reuse)

        dropout1 = tf.nn.dropout(conv4_norm, keep_prob)
        flatten1 = tf.reshape(dropout1, [-1, 4*4*1024])

        fc1 = tf.matmul(flatten1, W_fc1)+b_fc1

        return fc1
项目:divcolor    作者:aditya12agd5    | 项目源码 | 文件源码
def __cond_encoder(self, scope, input_tensor, bn_is_training, keep_prob, in_nch=1, reuse=False):

        lf = self.layer_factory
        input_tensor2d = tf.reshape(input_tensor, [self.flags.batch_size, \
            self.flags.img_height, self.flags.img_width, 1])
        nch = tensor_shape.as_dimension(input_tensor2d.get_shape()[3]).value
        nout = self.flags.hidden_size

        if(reuse == False):
            W_conv1 = lf.weight_variable(name='W_conv1_cond', shape=[5, 5, nch, 128])
            W_conv2 = lf.weight_variable(name='W_conv2_cond', shape=[5, 5, 128, 256])
            W_conv3 = lf.weight_variable(name='W_conv3_cond', shape=[5, 5, 256, 512])
            W_conv4 = lf.weight_variable(name='W_conv4_cond', shape=[4, 4, 512, self.flags.hidden_size])

            b_conv1 = lf.bias_variable(name='b_conv1_cond', shape=[128])
            b_conv2 = lf.bias_variable(name='b_conv2_cond', shape=[256])
            b_conv3 = lf.bias_variable(name='b_conv3_cond', shape=[512])
            b_conv4 = lf.bias_variable(name='b_conv4_cond', shape=[self.flags.hidden_size])
        else:
            W_conv1 = lf.weight_variable(name='W_conv1_cond')
            W_conv2 = lf.weight_variable(name='W_conv2_cond')
            W_conv3 = lf.weight_variable(name='W_conv3_cond')
            W_conv4 = lf.weight_variable(name='W_conv4_cond')

            b_conv1 = lf.bias_variable(name='b_conv1_cond')
            b_conv2 = lf.bias_variable(name='b_conv2_cond')
            b_conv3 = lf.bias_variable(name='b_conv3_cond')
            b_conv4 = lf.bias_variable(name='b_conv4_cond')

        conv1 = tf.nn.relu(lf.conv2d(input_tensor2d, W_conv1, stride=2) + b_conv1)
        conv1_norm = lf.batch_norm_aiuiuc_wrapper(conv1, bn_is_training, \
                'BN1_cond', reuse_vars=reuse)

        conv2 = tf.nn.relu(lf.conv2d(conv1_norm, W_conv2, stride=2) + b_conv2)
        conv2_norm = lf.batch_norm_aiuiuc_wrapper(conv2, bn_is_training, \
                'BN2_cond', reuse_vars=reuse)

        conv3 = tf.nn.relu(lf.conv2d(conv2_norm, W_conv3, stride=2) + b_conv3)
        conv3_norm = lf.batch_norm_aiuiuc_wrapper(conv3, bn_is_training, \
                'BN3_cond', reuse_vars=reuse)

        conv4 = tf.nn.relu(lf.conv2d(conv3_norm, W_conv4, stride=2) + b_conv4)
        conv4_norm = lf.batch_norm_aiuiuc_wrapper(conv4, bn_is_training, \
                'BN4_cond', reuse_vars=reuse)

        return conv1_norm, conv2_norm, conv3_norm, conv4_norm
项目:divcolor    作者:aditya12agd5    | 项目源码 | 文件源码
def __encoder(self, scope, input_tensor, bn_is_training, keep_prob, in_nch=2, reuse=False):

        lf = self.layer_factory

        input_tensor2d = tf.reshape(input_tensor, [self.flags.batch_size, \
                self.flags.img_height, self.flags.img_width, in_nch])

        nch = tensor_shape.as_dimension(input_tensor2d.get_shape()[3]).value

        if(reuse==False):
            W_conv1 = lf.weight_variable(name='W_conv1', shape=[5, 5, nch, 128])
            W_conv2 = lf.weight_variable(name='W_conv2', shape=[5, 5, 128, 256])
            W_conv3 = lf.weight_variable(name='W_conv3', shape=[5, 5, 256, 512])
            W_conv4 = lf.weight_variable(name='W_conv4', shape=[4, 4, 512, 1024])
            W_fc1 = lf.weight_variable(name='W_fc1', shape=[4*4*1024, self.flags.hidden_size * 2])

            b_conv1 = lf.bias_variable(name='b_conv1', shape=[128])
            b_conv2 = lf.bias_variable(name='b_conv2', shape=[256])
            b_conv3 = lf.bias_variable(name='b_conv3', shape=[512])
            b_conv4 = lf.bias_variable(name='b_conv4', shape=[1024])
            b_fc1 = lf.bias_variable(name='b_fc1', shape=[self.flags.hidden_size * 2])
        else:
            W_conv1 = lf.weight_variable(name='W_conv1')
            W_conv2 = lf.weight_variable(name='W_conv2')
            W_conv3 = lf.weight_variable(name='W_conv3')
            W_conv4 = lf.weight_variable(name='W_conv4')
            W_fc1 = lf.weight_variable(name='W_fc1')

            b_conv1 = lf.bias_variable(name='b_conv1')
            b_conv2 = lf.bias_variable(name='b_conv2')
            b_conv3 = lf.bias_variable(name='b_conv3')
            b_conv4 = lf.bias_variable(name='b_conv4')
            b_fc1 = lf.bias_variable(name='b_fc1')

        conv1 = tf.nn.relu(lf.conv2d(input_tensor2d, W_conv1, stride=2) + b_conv1)
        conv1_norm = lf.batch_norm_aiuiuc_wrapper(conv1, bn_is_training, \
            'BN1', reuse_vars=reuse)

        conv2 = tf.nn.relu(lf.conv2d(conv1_norm, W_conv2, stride=2) + b_conv2)
        conv2_norm = lf.batch_norm_aiuiuc_wrapper(conv2, bn_is_training, \
            'BN2', reuse_vars=reuse)

        conv3 = tf.nn.relu(lf.conv2d(conv2_norm, W_conv3, stride=2) + b_conv3)
        conv3_norm = lf.batch_norm_aiuiuc_wrapper(conv3, bn_is_training, \
            'BN3', reuse_vars=reuse)

        conv4 = tf.nn.relu(lf.conv2d(conv3_norm, W_conv4, stride=2) + b_conv4)
        conv4_norm = lf.batch_norm_aiuiuc_wrapper(conv4, bn_is_training, \
            'BN4', reuse_vars=reuse)

        dropout1 = tf.nn.dropout(conv4_norm, keep_prob)
        flatten1 = tf.reshape(dropout1, [-1, 4*4*1024])

        fc1 = tf.matmul(flatten1, W_fc1)+b_fc1

        return fc1