當前位置: 首頁>>代碼示例>>Python>>正文


Python spatial_transformer.transformer方法代碼示例

本文整理匯總了Python中spatial_transformer.transformer方法的典型用法代碼示例。如果您正苦於以下問題:Python spatial_transformer.transformer方法的具體用法?Python spatial_transformer.transformer怎麽用?Python spatial_transformer.transformer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在spatial_transformer的用法示例。


在下文中一共展示了spatial_transformer.transformer方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: stp_transformation

# 需要導入模塊: import spatial_transformer [as 別名]
# 或者: from spatial_transformer import transformer [as 別名]
def stp_transformation(prev_image, stp_input, num_masks):
  """Apply spatial transformer predictor (STP) to previous image.

  Args:
    prev_image: previous image to be transformed.
    stp_input: hidden layer to be used for computing STN parameters.
    num_masks: number of masks and hence the number of STP transformations.
  Returns:
    List of images transformed by the predicted STP parameters.
  """
  # Only import spatial transformer if needed.
  from spatial_transformer import transformer

  identity_params = tf.convert_to_tensor(
      np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32))
  transformed = []
  for i in range(num_masks - 1):
    params = slim.layers.fully_connected(
        stp_input, 6, scope='stp_params' + str(i),
        activation_fn=None) + identity_params
    transformed.append(transformer(prev_image, params))

  return transformed 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:25,代碼來源:prediction_model.py

示例2: stp_transformation

# 需要導入模塊: import spatial_transformer [as 別名]
# 或者: from spatial_transformer import transformer [as 別名]
def stp_transformation(prev_image, stp_input, num_masks):
    """Apply spatial transformer predictor (STP) to previous image.

    Args:
        prev_image: previous image to be transformed.
        stp_input: hidden layer to be used for computing STN parameters.
        num_masks: number of masks and hence the number of STP transformations.
    Returns:
        List of images transformed by the predicted STP parameters.
    """
    # Only import spatial transformer if needed.
    from spatial_transformer import transformer

    identity_params = tf.convert_to_tensor(
        np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32))
    transformed = []
    for i in range(num_masks - 1):
        params = slim.layers.fully_connected(
            stp_input, 6, scope='stp_params' + str(i),
            activation_fn=None) + identity_params
        transformed.append(transformer(prev_image, params))

    return transformed 
開發者ID:alexlee-gk,項目名稱:video_prediction,代碼行數:25,代碼來源:sv2p_model.py

示例3: stp_transformation

# 需要導入模塊: import spatial_transformer [as 別名]
# 或者: from spatial_transformer import transformer [as 別名]
def stp_transformation(prev_image, stp_input, num_masks):
    """Apply spatial transformer predictor (STP) to previous image.

    Args:
        prev_image: previous image to be transformed.
        stp_input: hidden layer to be used for computing STN parameters.
        num_masks: number of masks and hence the number of STP transformations.
    Returns:
        List of images transformed by the predicted STP parameters.
     """
    # Only import spatial transformer if needed.
    from spatial_transformer import transformer

    identity_params = tf.convert_to_tensor(
        np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32))
    transformed = []
    for i in range(num_masks - 1):
        params = slim.layers.fully_connected(
            stp_input, 6, scope='stp_params' + str(i),
            activation_fn=None) + identity_params
        transformed.append(transformer(prev_image, params))

    return transformed 
開發者ID:alexlee-gk,項目名稱:video_prediction,代碼行數:25,代碼來源:dna_model.py

示例4: conv

# 需要導入模塊: import spatial_transformer [as 別名]
# 或者: from spatial_transformer import transformer [as 別名]
def conv(self, input, k_h, k_w, c_o, s_h, s_w, name, relu=True, padding=DEFAULT_PADDING, group=1, trainable=True):
        """
        k_h:    kernel height
        k_w:    kernel wideth
        c_o:    channel output
        s_h:    strides height
        s_w:    stirdes width
        """
        if (isinstance(input, tuple)):
            input = input[0] # spatial transformer output, only consider data

        self.validate_padding(padding)
        c_i = input.get_shape()[-1]  #channel input
        assert c_i%group==0
        assert c_o%group==0
        convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
        with tf.variable_scope(name) as scope:

            init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
            init_biases = tf.constant_initializer(0.0)
            kernel = self.make_var('weights', [k_h, k_w, c_i/group, c_o], init_weights, trainable)
            biases = self.make_var('biases', [c_o], init_biases, trainable)

            if group==1:
                conv = convolve(input, kernel)
            else:
                input_groups = tf.split(3, group, input)
                kernel_groups = tf.split(3, group, kernel)
                output_groups = [convolve(i, k) for i,k in zip(input_groups, kernel_groups)]
                conv = tf.concat(3, output_groups)
            if relu:
                bias = tf.nn.bias_add(conv, biases)
                return tf.nn.relu(bias, name=scope.name)
            return tf.nn.bias_add(conv, biases, name=scope.name) 
開發者ID:chenwuperth,項目名稱:rgz_rcnn,代碼行數:36,代碼來源:network.py

示例5: stp_transformation

# 需要導入模塊: import spatial_transformer [as 別名]
# 或者: from spatial_transformer import transformer [as 別名]
def stp_transformation(self, prev_image, stp_input, num_masks, reuse= None, suffix = None):
        """Apply spatial transformer predictor (STP) to previous image.

        Args:
          prev_image: previous image to be transformed.
          stp_input: hidden layer to be used for computing STN parameters.
          num_masks: number of masks and hence the number of STP transformations.
        Returns:
          List of images transformed by the predicted STP parameters.
        """
        # Only import spatial transformer if needed.
        from spatial_transformer import transformer

        identity_params = tf.convert_to_tensor(
            np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32))
        transformed = []
        trafos = []
        for i in range(num_masks):
            params = slim.layers.fully_connected(
                stp_input, 6, scope='stp_params' + str(i) + suffix,
                activation_fn=None,
                reuse= reuse) + identity_params
            outsize = (prev_image.get_shape()[1], prev_image.get_shape()[2])
            transformed.append(transformer(prev_image, params, outsize))
            trafos.append(params)

        return transformed, trafos 
開發者ID:alexlee-gk,項目名稱:video_prediction,代碼行數:29,代碼來源:sna_model.py

示例6: obj_ll

# 需要導入模塊: import spatial_transformer [as 別名]
# 或者: from spatial_transformer import transformer [as 別名]
def obj_ll(self, images, z_where):
        num_steps = self.conf.num_steps
        patch_h, patch_w = self.conf.patch_height, self.conf.patch_width
        n, scene_h, scene_w, chans = map(int, images.shape)

        # Extract object patches (also referred to as y)
        patches, object_scores = stn.batch_transformer(images, z_where,
                                                       [patch_h, patch_w])
        patches = tf.identity(patches, name='y')

        # Compute background score iteratively by 'cutting out' each object
        cur_bg_score = tf.ones_like(object_scores[:, 0])
        bg_maps = [cur_bg_score]
        obj_visible = []
        for step in range(num_steps):
            # Everything outside the scene is unobserved -> pad bg_score with zeros
            padded_bg_score = tf.pad(cur_bg_score, [[0, 0], [1, 1], [1, 1]])
            padded_bg_score = tf.expand_dims(padded_bg_score, -1)
            shifted_z_where = z_where[:, step] + [0., 0., 1., 0., 0., 1.]
            vis, _ = stn.transformer(padded_bg_score, shifted_z_where,
                                     [patch_h, patch_w])
            obj_visible.append(vis[..., 0])

            cur_bg_score *= 1 - object_scores[:, step]
            # cur_bg_score = tf.clip_by_value(cur_bg_score, 0.0, 1.0)
            bg_maps.append(cur_bg_score)

        tf.identity(cur_bg_score, name='bg_score')
        obj_visible = tf.stack(obj_visible, axis=1)
        overlap_ratio = 1 - tf.reduce_mean(obj_visible, axis=[2, 3])

        flattened_patches = tf.reshape(patches, [n * num_steps, patch_h * patch_w * chans])
        spn_input = flattened_patches

        pixels_visible = tf.reshape(obj_visible, [n, num_steps, patch_h * patch_w, 1])
        channels_visible = tf.tile(pixels_visible, [1, 1, 1, chans])
        channels_visible = tf.reshape(channels_visible, [n, num_steps, patch_h * patch_w * chans])
        channels_visible = tf.identity(channels_visible, name='obj_vis')
        marginalize = 1 - channels_visible
        marginalize = tf.reshape(marginalize, [n * num_steps, patch_h * patch_w * chans])

        spn_output = self.obj_spn.forward(spn_input, marginalize)
        p_ys = spn_output[:, 0]  # tf.reduce_logsumexp(spn_output + tf.log(0.1), axis=1)
        p_ys = tf.reshape(p_ys, [n, num_steps])
        # Scale by patch size to approximate a calibrated likelihood over x
        p_ys *= z_where[:, :, 0] * z_where[:, :, 4]

        return p_ys, bg_maps, overlap_ratio 
開發者ID:stelzner,項目名稱:supair,代碼行數:50,代碼來源:model.py


注:本文中的spatial_transformer.transformer方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。