当前位置: 首页>>代码示例>>Python>>正文


Python functions.transpose方法代码示例

本文整理汇总了Python中chainer.functions.transpose方法的典型用法代码示例。如果您正苦于以下问题:Python functions.transpose方法的具体用法?Python functions.transpose怎么用?Python functions.transpose使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer.functions的用法示例。


在下文中一共展示了functions.transpose方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import transpose [as 别名]
def __call__(self, x):
        if self.dr:
            with chainer.using_config('train', True):
                x = F.dropout(x, self.dr)
        if self.gap:
            x = F.sum(x, axis=(2,3))
        N = x.shape[0]
        #Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py
        feature = F.reshape(F.leaky_relu(x), (N, -1))
        m = F.reshape(self.md(feature), (N, self.B * self.C, 1))
        m0 = F.broadcast_to(m, (N, self.B * self.C, N))
        m1 = F.transpose(m0, (2, 1, 0))
        d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))
        d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1
        h = F.concat([feature, d])

        h = self.l(h)
        return h 
开发者ID:pstuvwx,项目名称:Deep_VoiceChanger,代码行数:20,代码来源:block.py

示例2: block_embed

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import transpose [as 别名]
def block_embed(embed, x, dropout=0.):
    """Embedding function followed by convolution

    Args:
        embed (callable): A :func:`~chainer.functions.embed_id` function
            or :class:`~chainer.links.EmbedID` link.
        x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
        :class:`cupy.ndarray`): Input variable, which
            is a :math:`(B, L)`-shaped int array. Its first dimension
            :math:`(B)` is assumed to be the *minibatch dimension*.
            The second dimension :math:`(L)` is the length of padded
            sentences.
        dropout (float): Dropout ratio.

    Returns:
        ~chainer.Variable: Output variable. A float array with shape
        of :math:`(B, N, L, 1)`. :math:`(N)` is the number of dimensions
        of word embedding.

    """
    e = embed(x)
    e = F.dropout(e, ratio=dropout)
    e = F.transpose(e, (0, 2, 1))
    e = e[:, :, :, None]
    return e 
开发者ID:Pinafore,项目名称:qb,代码行数:27,代码来源:nets.py

示例3: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import transpose [as 别名]
def __call__(self, x, mask):
        self.m.W.data = self.xp.array(self.maskW) #mask windows are set by 1
        h = self.c(x*mask) #(B,C,H,W)
        B,C,H,W = h.shape
        b = F.transpose(F.broadcast_to(self.c.b,(B,H,W,C)),(0,3,1,2))
        h = h - b
        mask_sums = self.m(mask)
        mask_new = (self.xp.sign(mask_sums.data-0.5)+1.0)*0.5
        mask_new_b = mask_new.astype("bool")
        
        mask_sums = F.where(mask_new_b,mask_sums,0.01*Variable(self.xp.ones(mask_sums.shape).astype("f")))
        h = h/mask_sums + b
         
        mask_new = Variable(mask_new)
        h = F.where(mask_new_b, h, Variable(self.xp.zeros(h.shape).astype("f"))) 

        if self.bn:
            h = self.batchnorm(h)
        if self.noise:
            h = add_noise(h)
        if self.dropout:
            h = F.dropout(h)
        if not self.activation is None:
            h = self.activation(h)
        return h, mask_new 
开发者ID:SeitaroShinagawa,项目名称:chainer-partial_convolution_image_inpainting,代码行数:27,代码来源:net.py

示例4: embed_xs_with_prediction

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import transpose [as 别名]
def embed_xs_with_prediction(self, xs, labels=None, batch='concat'):
        predicted_exs = self.bilm.predict_embed(
            xs, self.embed.W,
            labels=labels,
            dropout=self.config['dropout'],
            mode=self.config['mode'],
            temp=self.config['temp'],
            word_lower_bound=self.config['word_lower_bound'],
            gold_lower_bound=self.config['gold_lower_bound'],
            gumbel=self.config['gumbel'],
            residual=self.config['residual'],
            wordwise=self.config['wordwise'],
            add_original=self.config['add_original'],
            augment_ratio=self.config['augment_ratio'])
        if batch == 'concat':
            predicted_ex_block = F.pad_sequence(predicted_exs, padding=0.)
            predicted_ex_block = F.transpose(
                predicted_ex_block, (0, 2, 1))[:, :, :, None]
            return predicted_ex_block
        elif batch == 'list':
            return predicted_exs
        else:
            raise NotImplementedError 
开发者ID:pfnet-research,项目名称:contextual_augmentation,代码行数:25,代码来源:nets.py

示例5: inverse

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import transpose [as 别名]
def inverse(self, y):
        scale_sqr = self.scale * self.scale
        batch, y_channels, y_height, y_width = y.shape
        assert (y_channels % scale_sqr == 0)
        x_channels = y_channels // scale_sqr
        x_height = y_height * self.scale
        x_width = y_width * self.scale

        x = F.transpose(y, axes=(0, 2, 3, 1))
        x = x.reshape(batch, y_height, y_width, scale_sqr, x_channels)
        d3_split_seq = F.split_axis(x, indices_or_sections=(x.shape[3] // self.scale), axis=3)
        d3_split_seq = [t.reshape(batch, y_height, x_width, x_channels) for t in d3_split_seq]
        x = F.stack(d3_split_seq, axis=0)
        x = F.transpose(F.swapaxes(x, axis1=0, axis2=1), axes=(0, 2, 1, 3, 4)).reshape(
            batch, x_height, x_width, x_channels)
        x = F.transpose(x, axes=(0, 3, 1, 2))
        return x 
开发者ID:osmr,项目名称:imgclsmob,代码行数:19,代码来源:irevnet.py

示例6: process_trajectory

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import transpose [as 别名]
def process_trajectory(self, l):
        """This is the time-dependent convolution operation, applied to a trajectory (in order).
        """
        shp = l.shape[0]
        # First dim is batchsize=1, then either 1 channel for 2d conv or n_feat channels
        # for 1d conv.
        l = F.expand_dims(l, axis=0)
        l = F.transpose(l, (0, 2, 1))
        l = self.traj_c0(l)
        l = F.leaky_relu(l)
        l = self.traj_c1(l)
        l = F.leaky_relu(l)
        l = F.sum(l, axis=(0, 2)) / l.shape[0] / l.shape[2]
        l = F.expand_dims(l, axis=0)
        l = self.traj_d0(l)
        l = F.tile(l, (shp, 1))
        return l 
开发者ID:openai,项目名称:EPG,代码行数:19,代码来源:losses.py

示例7: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import transpose [as 别名]
def __call__(self, images, label=None):
        if self.uses_original_data:
            # handle each individual view as increase in batch size
            batch_size, num_channels, height, width = images.shape
            images = F.reshape(images, (batch_size, num_channels, height, 4, -1))
            images = F.transpose(images, (0, 3, 1, 2, 4))
            images = F.reshape(images, (batch_size * 4, num_channels, height, width // 4))

        batch_size = images.shape[0]
        h = self.localization_net(images)
        new_batch_size = h.shape[0]
        batch_size_increase_factor = new_batch_size // batch_size
        images = F.concat([images for _ in range(batch_size_increase_factor)], axis=0)

        if label is None:
            return self.recognition_net(images, h)
        return self.recognition_net(images, h, label) 
开发者ID:Bartzi,项目名称:see,代码行数:19,代码来源:fsns.py

示例8: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import transpose [as 别名]
def __call__(self, input_ids, input_mask, token_type_ids):
        final_hidden = self.bert.get_sequence_output(
            input_ids,
            input_mask,
            token_type_ids)
        batch_size = final_hidden.shape[0]
        seq_length = final_hidden.shape[1]
        hidden_size = final_hidden.shape[2]

        final_hidden_matrix = F.reshape(
            final_hidden, [batch_size * seq_length, hidden_size])

        logits = self.output(final_hidden_matrix)

        logits = F.reshape(logits, [batch_size, seq_length, 2])
        logits = logits - (1 - input_mask[:, :, None]) * 1000.  # ignore pads
        logits = F.transpose(logits, [2, 0, 1])

        unstacked_logits = F.separate(logits, axis=0)

        (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
        return (start_logits, end_logits) 
开发者ID:chainer,项目名称:models,代码行数:24,代码来源:modeling.py

示例9: _load_projection

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import transpose [as 别名]
def _load_projection(self):
        cnn_options = self._options['char_cnn']
        filters = cnn_options['filters']
        n_filters = sum(f[1] for f in filters)

        with self.init_scope():
            self._projection = L.Linear(
                n_filters, self.output_dim, nobias=False)
        with h5py.File(cached_path(self._weight_file), 'r') as fin:
            weight = fin['CNN_proj']['W_proj'][...]
            bias = fin['CNN_proj']['b_proj'][...]
            self._projection.W.data[:] = numpy.transpose(weight)
            self._projection.b.data[:] = bias

            self._projection.W._requires_grad = self.requires_grad
            self._projection.b._requires_grad = self.requires_grad 
开发者ID:chainer,项目名称:models,代码行数:18,代码来源:elmo.py

示例10: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import transpose [as 别名]
def __call__(self, query, key, value, mask=None):
        """
            Perform attention on the value array, using the query and key parameters for calculating the attention mask.
            :param query: matrix of shape (batch_size, num_timesteps, transformer_size) that is used for attention mask calculation
            :param key: matrix of shape (batch_size, num_timesteps, transformer_size) that is used for attention mask calculation
            :param value: matrix of shape (batch_size, num_timesteps, transformer_size) that is used for attention calculation
            :param mask: mask that can be used to mask out parts of the feature maps and avoid attending to those parts
            :return: the attended feature map `value`.
        """
        if mask is not None:
            mask = mask[:, self.xp.newaxis, ...]

        batch_size = len(query)

        query, key, value = [self.project(linear, x, batch_size) for linear, x in zip(self.linears, (query, key, value))]

        x, self.attention = self.attention_implementation(query, key, value, mask=mask, dropout_ratio=self.dropout_ratio)

        x = F.transpose(x, (0, 2, 1, 3))
        x = F.reshape(x, (batch_size, -1, self.num_heads * self.key_dimensionality))

        return self.linears[-1](x, n_batch_axes=2) 
开发者ID:chainer,项目名称:models,代码行数:24,代码来源:attention.py

示例11: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import transpose [as 别名]
def __call__(self, x):
        N = x.data.shape[0]
        h = F.leaky_relu(self.c0_0(x))
        h = F.leaky_relu(self.bn0_1(self.c0_1(h)))
        h = F.leaky_relu(self.bn1_0(self.c1_0(h)))
        h = F.leaky_relu(self.bn1_1(self.c1_1(h)))
        h = F.leaky_relu(self.bn2_0(self.c2_0(h)))
        h = F.leaky_relu(self.bn2_1(self.c2_1(h)))
        feature = F.reshape(F.leaky_relu(self.c3_0(h)), (N, 8192))
        m = F.reshape(self.md(feature), (N, self.B * self.C, 1))
        m0 = F.broadcast_to(m, (N, self.B * self.C, N))
        m1 = F.transpose(m0, (2, 1, 0))
        d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))
        d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1
        h = F.concat([feature, d])

        return self.l4(h) 
开发者ID:pfnet-research,项目名称:chainer-gan-lib,代码行数:19,代码来源:net.py

示例12: _generate_fake_video

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import transpose [as 别名]
def _generate_fake_video(self, z_slow, z_fast):
        n_b, z_fast_dim, n_frames = z_fast.shape
        self.batchsize = n_b
        z_fast = F.reshape(F.transpose(
            z_fast, [0, 2, 1]), (n_b * n_frames, z_fast_dim))

        n_b, z_slow_dim = z_slow.shape
        z_slow = F.reshape(
            F.broadcast_to(F.reshape(z_slow, (n_b, 1, z_slow_dim)),
                           (n_b, n_frames, z_slow_dim)),
            (n_b * n_frames, z_slow_dim))

        with chainer.using_config('train', True):
            fake_video = self.vgen(z_slow, z_fast)
            _, n_ch, h, w = fake_video.shape

        fake_video = F.transpose(
            F.reshape(fake_video, (n_b, n_frames, n_ch, h, w)),
            [0, 2, 1, 3, 4])
        return fake_video 
开发者ID:pfnet-research,项目名称:tgan,代码行数:22,代码来源:dcgan_updater_base.py

示例13: get_transform_params

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import transpose [as 别名]
def get_transform_params(self, features):
        batch_size, num_channels, feature_height, feature_weight = features.shape
        features = F.reshape(features, (batch_size, num_channels, -1))
        features = F.transpose(features, (0, 2, 1))

        target = chainer.Variable(self.xp.zeros((batch_size, 1, 6), dtype=chainer.get_dtype()))

        for _ in range(self.num_bboxes_to_localize):
            embedded_params = self.param_embedder(target.array, n_batch_axes=2)
            embedded_params = self.positional_encoding(embedded_params)
            decoded = self.decoder(embedded_params, features, None, self.mask)
            params = self.param_predictor(decoded, n_batch_axes=2)
            target = F.concat([target, params[:, -1:]])

        target = F.reshape(target[:, 1:], (-1,) + target.shape[2:])
        transform_params = rotation_dropout(F.reshape(target, (-1, 2, 3)), ratio=self.dropout_ratio)
        return transform_params 
开发者ID:Bartzi,项目名称:kiss,代码行数:19,代码来源:transformer_text_localizer.py

示例14: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import transpose [as 别名]
def __call__(self, distances, points1, points2):
        """

        Args:
            distances (numpy.ndarray or cupy.ndarray):
                3-dim array (bs, num_point2, num_point1)
            points1 (Variable): 3-dim (batch_size, num_point1, ch1)
            points2 (Variable): 3-dim (batch_size, num_point2, ch2)
                points2 is deeper, rich feature. num_point1 > num_point2

        Returns (Variable): 3-dim (batch_size, num_point1, ch1+ch2)
        """
        # h: interpolated_points (batch_size, num_point1, ch1+ch2)
        h = self.interpolation(distances, points1, points2)
        # h: interpolated_points (batch_size, ch1+ch2, num_point1, 1)
        h = functions.transpose(h, (0, 2, 1))[:, :, :, None]
        for conv_block in self.feature_extractor_list:
            h = conv_block(h)
        h = functions.transpose(h[:, :, :, 0], (0, 2, 1))
        return h  # h (bs, num_point, ch') 
开发者ID:corochann,项目名称:chainer-pointnet,代码行数:22,代码来源:feature_propagation_block.py

示例15: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import transpose [as 别名]
def __call__(self, coord_points, feature_points=None):
        # coord_points   (batch_size, num_point, coord_dim)
        # feature_points (batch_size, num_point, ch)
        # num_point, ch: coord_dim

        # grouped_points (batch_size, k, num_sample, channel)
        # center_points  (batch_size, k, coord_dim)
        grouped_points, center_points = self.sampling_grouping(
            coord_points, feature_points=feature_points)
        # set alias `h` -> (bs, channel, num_sample, k)
        # Note: transpose may be removed by optimizing shape sequence for sampling_groupoing
        h = functions.transpose(grouped_points, (0, 3, 2, 1))
        # h (bs, ch, num_sample_in_region, k=num_group)
        for conv_block in self.feature_extractor_list:
            h = conv_block(h)
        # TODO: try other option of pooling function
        h = functions.max(h, axis=2, keepdims=True)
        # h (bs, ch, 1, k=num_group)
        for conv_block in self.head_list:
            h = conv_block(h)
        h = functions.transpose(h[:, :, 0, :], (0, 2, 1))
        return center_points, h  # (bs, k, coord), h (bs, k, ch') 
开发者ID:corochann,项目名称:chainer-pointnet,代码行数:24,代码来源:set_abstraction_all_block.py


注:本文中的chainer.functions.transpose方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。