当前位置: 首页>>代码示例>>Python>>正文


Python spatial_transformer.transformer方法代码示例

本文整理汇总了Python中spatial_transformer.transformer方法的典型用法代码示例。如果您正苦于以下问题:Python spatial_transformer.transformer方法的具体用法?Python spatial_transformer.transformer怎么用?Python spatial_transformer.transformer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在spatial_transformer的用法示例。


在下文中一共展示了spatial_transformer.transformer方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: stp_transformation

# 需要导入模块: import spatial_transformer [as 别名]
# 或者: from spatial_transformer import transformer [as 别名]
def stp_transformation(prev_image, stp_input, num_masks):
  """Apply spatial transformer predictor (STP) to previous image.

  Args:
    prev_image: previous image to be transformed.
    stp_input: hidden layer to be used for computing STN parameters.
    num_masks: number of masks and hence the number of STP transformations.
  Returns:
    List of images transformed by the predicted STP parameters.
  """
  # Only import spatial transformer if needed.
  from spatial_transformer import transformer

  identity_params = tf.convert_to_tensor(
      np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32))
  transformed = []
  for i in range(num_masks - 1):
    params = slim.layers.fully_connected(
        stp_input, 6, scope='stp_params' + str(i),
        activation_fn=None) + identity_params
    transformed.append(transformer(prev_image, params))

  return transformed 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:25,代码来源:prediction_model.py

示例2: stp_transformation

# 需要导入模块: import spatial_transformer [as 别名]
# 或者: from spatial_transformer import transformer [as 别名]
def stp_transformation(prev_image, stp_input, num_masks):
    """Apply spatial transformer predictor (STP) to previous image.

    Args:
        prev_image: previous image to be transformed.
        stp_input: hidden layer to be used for computing STN parameters.
        num_masks: number of masks and hence the number of STP transformations.
    Returns:
        List of images transformed by the predicted STP parameters.
    """
    # Only import spatial transformer if needed.
    from spatial_transformer import transformer

    identity_params = tf.convert_to_tensor(
        np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32))
    transformed = []
    for i in range(num_masks - 1):
        params = slim.layers.fully_connected(
            stp_input, 6, scope='stp_params' + str(i),
            activation_fn=None) + identity_params
        transformed.append(transformer(prev_image, params))

    return transformed 
开发者ID:alexlee-gk,项目名称:video_prediction,代码行数:25,代码来源:sv2p_model.py

示例3: stp_transformation

# 需要导入模块: import spatial_transformer [as 别名]
# 或者: from spatial_transformer import transformer [as 别名]
def stp_transformation(prev_image, stp_input, num_masks):
    """Apply spatial transformer predictor (STP) to previous image.

    Args:
        prev_image: previous image to be transformed.
        stp_input: hidden layer to be used for computing STN parameters.
        num_masks: number of masks and hence the number of STP transformations.
    Returns:
        List of images transformed by the predicted STP parameters.
     """
    # Only import spatial transformer if needed.
    from spatial_transformer import transformer

    identity_params = tf.convert_to_tensor(
        np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32))
    transformed = []
    for i in range(num_masks - 1):
        params = slim.layers.fully_connected(
            stp_input, 6, scope='stp_params' + str(i),
            activation_fn=None) + identity_params
        transformed.append(transformer(prev_image, params))

    return transformed 
开发者ID:alexlee-gk,项目名称:video_prediction,代码行数:25,代码来源:dna_model.py

示例4: conv

# 需要导入模块: import spatial_transformer [as 别名]
# 或者: from spatial_transformer import transformer [as 别名]
def conv(self, input, k_h, k_w, c_o, s_h, s_w, name, relu=True, padding=DEFAULT_PADDING, group=1, trainable=True):
        """
        k_h:    kernel height
        k_w:    kernel wideth
        c_o:    channel output
        s_h:    strides height
        s_w:    stirdes width
        """
        if (isinstance(input, tuple)):
            input = input[0] # spatial transformer output, only consider data

        self.validate_padding(padding)
        c_i = input.get_shape()[-1]  #channel input
        assert c_i%group==0
        assert c_o%group==0
        convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
        with tf.variable_scope(name) as scope:

            init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
            init_biases = tf.constant_initializer(0.0)
            kernel = self.make_var('weights', [k_h, k_w, c_i/group, c_o], init_weights, trainable)
            biases = self.make_var('biases', [c_o], init_biases, trainable)

            if group==1:
                conv = convolve(input, kernel)
            else:
                input_groups = tf.split(3, group, input)
                kernel_groups = tf.split(3, group, kernel)
                output_groups = [convolve(i, k) for i,k in zip(input_groups, kernel_groups)]
                conv = tf.concat(3, output_groups)
            if relu:
                bias = tf.nn.bias_add(conv, biases)
                return tf.nn.relu(bias, name=scope.name)
            return tf.nn.bias_add(conv, biases, name=scope.name) 
开发者ID:chenwuperth,项目名称:rgz_rcnn,代码行数:36,代码来源:network.py

示例5: stp_transformation

# 需要导入模块: import spatial_transformer [as 别名]
# 或者: from spatial_transformer import transformer [as 别名]
def stp_transformation(self, prev_image, stp_input, num_masks, reuse= None, suffix = None):
        """Apply spatial transformer predictor (STP) to previous image.

        Args:
          prev_image: previous image to be transformed.
          stp_input: hidden layer to be used for computing STN parameters.
          num_masks: number of masks and hence the number of STP transformations.
        Returns:
          List of images transformed by the predicted STP parameters.
        """
        # Only import spatial transformer if needed.
        from spatial_transformer import transformer

        identity_params = tf.convert_to_tensor(
            np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32))
        transformed = []
        trafos = []
        for i in range(num_masks):
            params = slim.layers.fully_connected(
                stp_input, 6, scope='stp_params' + str(i) + suffix,
                activation_fn=None,
                reuse= reuse) + identity_params
            outsize = (prev_image.get_shape()[1], prev_image.get_shape()[2])
            transformed.append(transformer(prev_image, params, outsize))
            trafos.append(params)

        return transformed, trafos 
开发者ID:alexlee-gk,项目名称:video_prediction,代码行数:29,代码来源:sna_model.py

示例6: obj_ll

# 需要导入模块: import spatial_transformer [as 别名]
# 或者: from spatial_transformer import transformer [as 别名]
def obj_ll(self, images, z_where):
        num_steps = self.conf.num_steps
        patch_h, patch_w = self.conf.patch_height, self.conf.patch_width
        n, scene_h, scene_w, chans = map(int, images.shape)

        # Extract object patches (also referred to as y)
        patches, object_scores = stn.batch_transformer(images, z_where,
                                                       [patch_h, patch_w])
        patches = tf.identity(patches, name='y')

        # Compute background score iteratively by 'cutting out' each object
        cur_bg_score = tf.ones_like(object_scores[:, 0])
        bg_maps = [cur_bg_score]
        obj_visible = []
        for step in range(num_steps):
            # Everything outside the scene is unobserved -> pad bg_score with zeros
            padded_bg_score = tf.pad(cur_bg_score, [[0, 0], [1, 1], [1, 1]])
            padded_bg_score = tf.expand_dims(padded_bg_score, -1)
            shifted_z_where = z_where[:, step] + [0., 0., 1., 0., 0., 1.]
            vis, _ = stn.transformer(padded_bg_score, shifted_z_where,
                                     [patch_h, patch_w])
            obj_visible.append(vis[..., 0])

            cur_bg_score *= 1 - object_scores[:, step]
            # cur_bg_score = tf.clip_by_value(cur_bg_score, 0.0, 1.0)
            bg_maps.append(cur_bg_score)

        tf.identity(cur_bg_score, name='bg_score')
        obj_visible = tf.stack(obj_visible, axis=1)
        overlap_ratio = 1 - tf.reduce_mean(obj_visible, axis=[2, 3])

        flattened_patches = tf.reshape(patches, [n * num_steps, patch_h * patch_w * chans])
        spn_input = flattened_patches

        pixels_visible = tf.reshape(obj_visible, [n, num_steps, patch_h * patch_w, 1])
        channels_visible = tf.tile(pixels_visible, [1, 1, 1, chans])
        channels_visible = tf.reshape(channels_visible, [n, num_steps, patch_h * patch_w * chans])
        channels_visible = tf.identity(channels_visible, name='obj_vis')
        marginalize = 1 - channels_visible
        marginalize = tf.reshape(marginalize, [n * num_steps, patch_h * patch_w * chans])

        spn_output = self.obj_spn.forward(spn_input, marginalize)
        p_ys = spn_output[:, 0]  # tf.reduce_logsumexp(spn_output + tf.log(0.1), axis=1)
        p_ys = tf.reshape(p_ys, [n, num_steps])
        # Scale by patch size to approximate a calibrated likelihood over x
        p_ys *= z_where[:, :, 0] * z_where[:, :, 4]

        return p_ys, bg_maps, overlap_ratio 
开发者ID:stelzner,项目名称:supair,代码行数:50,代码来源:model.py


注:本文中的spatial_transformer.transformer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。