當前位置: 首頁>>代碼示例>>Python>>正文


Python utils.deconv_output_length方法代碼示例

本文整理匯總了Python中tensorflow.python.layers.utils.deconv_output_length方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.deconv_output_length方法的具體用法?Python utils.deconv_output_length怎麽用?Python utils.deconv_output_length使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.layers.utils的用法示例。


在下文中一共展示了utils.deconv_output_length方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _compute_output_shape

# 需要導入模塊: from tensorflow.python.layers import utils [as 別名]
# 或者: from tensorflow.python.layers.utils import deconv_output_length [as 別名]
def _compute_output_shape(self, input_shape):
    input_shape = tensor_shape.TensorShape(input_shape).as_list()
    output_shape = list(input_shape)
    if self.data_format == 'channels_first':
      c_axis, h_axis, w_axis = 1, 2, 3
    else:
      c_axis, h_axis, w_axis = 3, 1, 2

    kernel_h, kernel_w = self.kernel_size
    stride_h, stride_w = self.strides

    output_shape[c_axis] = self.filters
    output_shape[h_axis] = utils.deconv_output_length(
        output_shape[h_axis], kernel_h, self.padding, stride_h)
    output_shape[w_axis] = utils.deconv_output_length(
        output_shape[w_axis], kernel_w, self.padding, stride_w)
    return tensor_shape.TensorShape(output_shape) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:19,代碼來源:convolutional.py

示例2: conv2d_transpose

# 需要導入模塊: from tensorflow.python.layers import utils [as 別名]
# 或者: from tensorflow.python.layers.utils import deconv_output_length [as 別名]
def conv2d_transpose(x, W, strides=(1, 1), padding="SAME"):
  strides = list(strides)

  # Compute output size, cf. tf.layers.Conv2DTranspose
  W_shape = tf.shape(W)
  inputs_shape = tf.shape(x)

  # Infer the dynamic output shape:
  out_height = deconv_output_length(inputs_shape[1], W_shape[0], padding, strides[0])
  out_width = deconv_output_length(inputs_shape[2], W_shape[1], padding, strides[1])
  output_shape = (inputs_shape[0], out_height, out_width, W_shape[2])

  return tf.nn.conv2d_transpose(x, W, tf.stack(output_shape), strides=[1] + strides + [1], padding=padding) 
開發者ID:tobiasfshr,項目名稱:MOTSFusion,代碼行數:15,代碼來源:Util.py

示例3: call

# 需要導入模塊: from tensorflow.python.layers import utils [as 別名]
# 或者: from tensorflow.python.layers.utils import deconv_output_length [as 別名]
def call(self, inputs):
    inputs_shape = array_ops.shape(inputs)
    batch_size = inputs_shape[0]
    if self.data_format == 'channels_first':
      c_axis, h_axis, w_axis = 1, 2, 3
    else:
      c_axis, h_axis, w_axis = 3, 1, 2

    height, width = inputs_shape[h_axis], inputs_shape[w_axis]
    kernel_h, kernel_w = self.kernel_size
    stride_h, stride_w = self.strides

    # Infer the dynamic output shape:
    out_height = utils.deconv_output_length(height,
                                            kernel_h,
                                            self.padding,
                                            stride_h)
    out_width = utils.deconv_output_length(width,
                                           kernel_w,
                                           self.padding,
                                           stride_w)
    if self.data_format == 'channels_first':
      output_shape = (batch_size, self.filters, out_height, out_width)
      strides = (1, 1, stride_h, stride_w)
    else:
      output_shape = (batch_size, out_height, out_width, self.filters)
      strides = (1, stride_h, stride_w, 1)

    output_shape_tensor = array_ops.stack(output_shape)
    outputs = nn.conv2d_transpose(
        inputs,
        self.kernel,
        output_shape_tensor,
        strides,
        padding=self.padding.upper(),
        data_format=utils.convert_data_format(self.data_format, ndim=4))

    # Infer the static output shape:
    out_shape = inputs.get_shape().as_list()
    out_shape[c_axis] = self.filters
    out_shape[h_axis] = utils.deconv_output_length(out_shape[h_axis],
                                                   kernel_h,
                                                   self.padding,
                                                   stride_h)
    out_shape[w_axis] = utils.deconv_output_length(out_shape[w_axis],
                                                   kernel_w,
                                                   self.padding,
                                                   stride_w)
    outputs.set_shape(out_shape)

    if self.bias:
      outputs = nn.bias_add(
          outputs,
          self.bias,
          data_format=utils.convert_data_format(self.data_format, ndim=4))

    if self.activation is not None:
      return self.activation(outputs)
    return outputs 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:61,代碼來源:convolutional.py

示例4: call

# 需要導入模塊: from tensorflow.python.layers import utils [as 別名]
# 或者: from tensorflow.python.layers.utils import deconv_output_length [as 別名]
def call(self, inputs):
    inputs_shape = array_ops.shape(inputs)
    batch_size = inputs_shape[0]
    if self.data_format == 'channels_first':
      c_axis, h_axis, w_axis = 1, 2, 3
    else:
      c_axis, h_axis, w_axis = 3, 1, 2

    height, width = inputs_shape[h_axis], inputs_shape[w_axis]
    kernel_h, kernel_w = self.kernel_size
    stride_h, stride_w = self.strides

    # Infer the dynamic output shape:
    out_height = utils.deconv_output_length(height,
                                            kernel_h,
                                            self.padding,
                                            stride_h)
    out_width = utils.deconv_output_length(width,
                                           kernel_w,
                                           self.padding,
                                           stride_w)
    if self.data_format == 'channels_first':
      output_shape = (batch_size, self.filters, out_height, out_width)
      strides = (1, 1, stride_h, stride_w)
    else:
      output_shape = (batch_size, out_height, out_width, self.filters)
      strides = (1, stride_h, stride_w, 1)

    output_shape_tensor = array_ops.stack(output_shape)
    outputs = nn.conv2d_transpose(
        inputs,
        self.kernel,
        output_shape_tensor,
        strides,
        padding=self.padding.upper(),
        data_format=utils.convert_data_format(self.data_format, ndim=4))

    if context.in_graph_mode():
      # Infer the static output shape:
      out_shape = inputs.get_shape().as_list()
      out_shape[c_axis] = self.filters
      out_shape[h_axis] = utils.deconv_output_length(out_shape[h_axis],
                                                     kernel_h,
                                                     self.padding,
                                                     stride_h)
      out_shape[w_axis] = utils.deconv_output_length(out_shape[w_axis],
                                                     kernel_w,
                                                     self.padding,
                                                     stride_w)
      outputs.set_shape(out_shape)

    if self.use_bias:
      outputs = nn.bias_add(
          outputs,
          self.bias,
          data_format=utils.convert_data_format(self.data_format, ndim=4))

    if self.activation is not None:
      return self.activation(outputs)
    return outputs 
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:62,代碼來源:convolutional.py

示例5: style_swap

# 需要導入模塊: from tensorflow.python.layers import utils [as 別名]
# 或者: from tensorflow.python.layers.utils import deconv_output_length [as 別名]
def style_swap(content, style, patch_size, stride):
    '''Efficiently swap content feature patches with nearest-neighbor style patches
       Original paper: https://arxiv.org/abs/1612.04337
       Adapted from: https://github.com/rtqichen/style-swap/blob/master/lib/NonparametricPatchAutoencoderFactory.lua
    '''
    nC = tf.shape(style)[-1]  # Num channels of input content feature and style-swapped output

    ### Extract patches from style image that will be used for conv/deconv layers
    style_patches = tf.extract_image_patches(style, [1,patch_size,patch_size,1], [1,stride,stride,1], [1,1,1,1], 'VALID')
    before_reshape = tf.shape(style_patches)  # NxRowsxColsxPatch_size*Patch_size*nC
    style_patches = tf.reshape(style_patches, [before_reshape[1]*before_reshape[2],patch_size,patch_size,nC])
    style_patches = tf.transpose(style_patches, [1,2,3,0])  # Patch_sizexPatch_sizexIn_CxOut_c

    # Normalize each style patch
    style_patches_norm = tf.nn.l2_normalize(style_patches, dim=3)

    # Compute cross-correlation/nearest neighbors of patches by using style patches as conv filters
    ss_enc = tf.nn.conv2d(content,
                          style_patches_norm,
                          [1,stride,stride,1],
                          'VALID')

    # For each spatial position find index of max along channel/patch dim  
    ss_argmax = tf.argmax(ss_enc, axis=3)
    encC = tf.shape(ss_enc)[-1]  # Num channels in intermediate conv output, same as # of patches
    
    # One-hot encode argmax with same size as ss_enc, with 1's in max channel idx for each spatial pos
    ss_oh = tf.one_hot(ss_argmax, encC, 1., 0., 3)

    # Calc size of transposed conv out
    deconv_out_H = utils.deconv_output_length(tf.shape(ss_oh)[1], patch_size, 'valid', stride)
    deconv_out_W = utils.deconv_output_length(tf.shape(ss_oh)[2], patch_size, 'valid', stride)
    deconv_out_shape = tf.stack([1,deconv_out_H,deconv_out_W,nC])

    # Deconv back to original content size with highest matching (unnormalized) style patch swapped in for each content patch
    ss_dec = tf.nn.conv2d_transpose(ss_oh,
                                    style_patches,
                                    deconv_out_shape,
                                    [1,stride,stride,1],
                                    'VALID')

    ### Interpolate to average overlapping patch locations
    ss_oh_sum = tf.reduce_sum(ss_oh, axis=3, keep_dims=True)

    filter_ones = tf.ones([patch_size,patch_size,1,1], dtype=tf.float32)
    
    deconv_out_shape = tf.stack([1,deconv_out_H,deconv_out_W,1])  # Same spatial size as ss_dec with 1 channel

    counting = tf.nn.conv2d_transpose(ss_oh_sum,
                                         filter_ones,
                                         deconv_out_shape,
                                         [1,stride,stride,1],
                                         'VALID')

    counting = tf.tile(counting, [1,1,1,nC])  # Repeat along channel dim to make same size as ss_dec

    interpolated_dec = tf.divide(ss_dec, counting)

    return interpolated_dec

### Adaptive Instance Normalization ### 
開發者ID:eridgd,項目名稱:WCT-TF,代碼行數:63,代碼來源:ops.py


注:本文中的tensorflow.python.layers.utils.deconv_output_length方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。