当前位置: 首页>>代码示例>>Python>>正文


Python functions.pad_sequence方法代码示例

本文整理汇总了Python中chainer.functions.pad_sequence方法的典型用法代码示例。如果您正苦于以下问题:Python functions.pad_sequence方法的具体用法?Python functions.pad_sequence怎么用?Python functions.pad_sequence使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer.functions的用法示例。


在下文中一共展示了functions.pad_sequence方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: embed_xs_with_prediction

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import pad_sequence [as 别名]
def embed_xs_with_prediction(self, xs, labels=None, batch='concat'):
        predicted_exs = self.bilm.predict_embed(
            xs, self.embed.W,
            labels=labels,
            dropout=self.config['dropout'],
            mode=self.config['mode'],
            temp=self.config['temp'],
            word_lower_bound=self.config['word_lower_bound'],
            gold_lower_bound=self.config['gold_lower_bound'],
            gumbel=self.config['gumbel'],
            residual=self.config['residual'],
            wordwise=self.config['wordwise'],
            add_original=self.config['add_original'],
            augment_ratio=self.config['augment_ratio'])
        if batch == 'concat':
            predicted_ex_block = F.pad_sequence(predicted_exs, padding=0.)
            predicted_ex_block = F.transpose(
                predicted_ex_block, (0, 2, 1))[:, :, :, None]
            return predicted_ex_block
        elif batch == 'list':
            return predicted_exs
        else:
            raise NotImplementedError 
开发者ID:pfnet-research,项目名称:contextual_augmentation,代码行数:25,代码来源:nets.py

示例2: make_batch

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import pad_sequence [as 别名]
def make_batch(features, gpu):
    """Creates a concatenated batch from a list of data and to_gpu."""

    all_input_ids = []
    all_input_mask = []
    all_input_type_ids = []

    for feature in features:
        all_input_ids.append(feature.input_ids)
        all_input_mask.append(feature.input_mask)
        all_input_type_ids.append(feature.input_type_ids)

    def stack_and_to_gpu(data_list):
        sdata = F.pad_sequence(
            data_list, length=None, padding=0).array
        return chainer.dataset.to_device(gpu, sdata)

    batch_input_ids = stack_and_to_gpu(all_input_ids).astype('i')
    batch_input_mask = stack_and_to_gpu(all_input_mask).astype('f')
    batch_input_type_ids = stack_and_to_gpu(all_input_type_ids).astype('i')
    return {'input_ids': batch_input_ids,
            'input_mask': batch_input_mask,
            'input_type_ids': batch_input_type_ids, } 
开发者ID:chainer,项目名称:models,代码行数:25,代码来源:extract_features.py

示例3: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import pad_sequence [as 别名]
def __call__(self, batch, device):
        """Perform subsampling.

        Args:
            batch (list): Batch that will be sabsampled.
            device (chainer.backend.Device): CPU or GPU device.

        Returns:
            chainer.Variable: xp.array that are padded and subsampled from batch.
            xp.array: xp.array of the length of the mini-batches.
            chainer.Variable: xp.array that are padded and subsampled from batch.

        """
        # For transformer, data is processed in CPU.
        # batch should be located in list
        assert len(batch) == 1
        xs, ys = batch[0]
        xs = F.pad_sequence(xs, padding=-1).data
        # get batch of lengths of input sequences
        ilens = np.array([x.shape[0] for x in xs], dtype=np.int32)
        return xs, ilens, ys 
开发者ID:espnet,项目名称:espnet,代码行数:23,代码来源:training.py

示例4: forward

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import pad_sequence [as 别名]
def forward(self, xs, l):
        inputs = F.pad_sequence(xs)
        h = inputs[:, 0]
        for time in range(l):
            h = inputs[:, time]
        return h 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:8,代码来源:For.py

示例5: forward

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import pad_sequence [as 别名]
def forward(self, xs, ilens):
        xs, ilens = self.blstm(xs, ilens)
        return F.pad_sequence(xs) 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:5,代码来源:EspNet_BLSTM.py

示例6: forward

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import pad_sequence [as 别名]
def forward(self, xs, h, c, mask):
        batch_size = len(xs)
        lens = [x.shape[0] for x in xs]
        #max_len = max(lens)
        max_len = self.sequence_length
        #mask = (np.expand_dims(np.arange(max_len), 0) <
        #        np.expand_dims(lens, 1)).astype(np.float)
        #h = np.zeros((batch_size, self.num_hidden), dtype=np.float32)
        #c = np.zeros((batch_size, self.num_hidden), dtype=np.float32)
        #h = self.initial_h
        #c = self.initial_c
        inputs = F.pad_sequence(xs)
        for time in range(max_len):
            x = inputs[:, time]
            input = F.concat((x, h), axis=1)
            gate = self.l(input)
            i = gate[:, 0:self.num_hidden]
            o = gate[:, self.num_hidden:self.num_hidden*2]
            f = gate[:, self.num_hidden*2:self.num_hidden*3]
            nc = gate[:, self.num_hidden*3:self.num_hidden*4]
            #i, o, f, nc = F.split_axis(gate, 4, axis=1)
            i = F.sigmoid(i)
            o = F.sigmoid(o)
            f = F.sigmoid(f)
            nc = F.tanh(nc)
            nc = f * c + i * nc
            nh = o * F.tanh(nc)
            m = mask[:, time]
            pmask = F.reshape(m, (self.batch_size,))
            pmask = F.broadcast_to(F.expand_dims(pmask, axis=1),
                                   (self.batch_size, self.num_hidden))
            nmask = 1.0 - pmask
            h = nh * pmask + h * nmask
        return h


# from https://github.com/chainer/chainer/blob/master/examples/seq2seq/seq2seq.py 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:39,代码来源:MyLSTM.py

示例7: original

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import pad_sequence [as 别名]
def original(self, enc_hs, dec_z, att_prev, scaling=2.0):
        '''AttDot forward

        :param enc_hs:
        :param dec_z:
        :param scaling:
        :return:
        '''
        batch = len(enc_hs)
        # pre-compute all h outside the decoder loop
        if self.pre_compute_enc_h is None:
            self.enc_h = F.pad_sequence(enc_hs)  # utt x frame x hdim
            self.h_length = self.enc_h.shape[1]
            # utt x frame x att_dim
            self.pre_compute_enc_h = F.tanh(
                linear_tensor(self.mlp_enc, self.enc_h))

        if dec_z is None:
            dec_z = chainer.Variable(self.xp.zeros(
                (batch, self.dunits), dtype=np.float32))
        else:
            dec_z = F.reshape(dec_z, (batch, self.dunits))

        # <phi (h_t), psi (s)> for all t
        u = F.broadcast_to(F.expand_dims(F.tanh(self.mlp_dec(dec_z)), 1),
                           self.pre_compute_enc_h.shape)
        e = F.sum(self.pre_compute_enc_h * u, axis=2)  # utt x frame
        # Applying a minus-large-number filter to make a probability value zero for a padded area
        # simply degrades the performance, and I gave up this implementation
        # Apply a scaling to make an attention sharp
        w = F.softmax(scaling * e)
        # weighted sum over flames
        # utt x hdim
        c = F.sum(self.enc_h * F.broadcast_to(F.expand_dims(w, 2), self.enc_h.shape), axis=1)

        return c, w 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:38,代码来源:EspNet_AttDot.py

示例8: forward

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import pad_sequence [as 别名]
def forward(self, xs, ilens):
        '''VGG2L forward

        :param xs:
        :param ilens:
        :return:
        '''
        logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))

        # x: utt x frame x dim
        xs = F.pad_sequence(xs)

        # x: utt x 1 (input channel num) x frame x dim
        xs = F.swapaxes(F.reshape(
            xs, (xs.shape[0], xs.shape[1], self.in_channel, xs.shape[2] // self.in_channel)), 1, 2)

        xs = F.relu(self.conv1_1(xs))
        xs = F.relu(self.conv1_2(xs))
        xs = F.max_pooling_2d(xs, 2, stride=2)

        xs = F.relu(self.conv2_1(xs))
        xs = F.relu(self.conv2_2(xs))
        xs = F.max_pooling_2d(xs, 2, stride=2)

        # change ilens accordingly
        # EDIT(hamaji): ChxVM puts int32 on GPU and it hurts the performance.
        # TODO(hamaji): Fix device assignment to get rid of this change.
        ilens = (ilens + 1) // 2
        ilens = (ilens + 1) // 2
        # ilens = self.xp.array(self.xp.ceil(self.xp.array(
        #     ilens, dtype=np.float32) / 2), dtype=np.int32)
        # ilens = self.xp.array(self.xp.ceil(self.xp.array(
        #     ilens, dtype=np.float32) / 2), dtype=np.int32)

        # x: utt_list of frame (remove zeropaded frames) x (input channel num x dim)
        xs = F.swapaxes(xs, 1, 2)
        xs = F.reshape(
            xs, (xs.shape[0], xs.shape[1], xs.shape[2] * xs.shape[3]))
        xs = [xs[i, :ilens[i], :] for i in range(len(ilens))]

        return xs, ilens 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:43,代码来源:EspNet_VGG2L.py

示例9: forward

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import pad_sequence [as 别名]
def forward(self, xs):
        y1 = F.pad_sequence(xs)
        return y1 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:5,代码来源:PadSequence.py

示例10: forward

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import pad_sequence [as 别名]
def forward(self, xs, ilens):
        '''VGG2L forward

        :param xs:
        :param ilens:
        :return:
        '''
        logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))

        # x: utt x frame x dim
        xs = F.pad_sequence(xs)

        # x: utt x 1 (input channel num) x frame x dim
        xs = F.swapaxes(F.reshape(
            xs, (xs.shape[0], xs.shape[1], self.in_channel, xs.shape[2] // self.in_channel)), 1, 2)

        xs = F.relu(self.conv1_1(xs))
        xs = F.relu(self.conv1_2(xs))
        xs = F.max_pooling_2d(xs, 2, stride=2)

        xs = F.relu(self.conv2_1(xs))
        xs = F.relu(self.conv2_2(xs))
        xs = F.max_pooling_2d(xs, 2, stride=2)

        # change ilens accordingly
        # EDIT(hamaji): XCVM puts int32 on GPU and it hurts the performance.
        # TODO(hamaji): Fix device assignment to get rid of this change.
        ilens = (ilens + 1) // 2
        ilens = (ilens + 1) // 2
        # ilens = self.xp.array(self.xp.ceil(self.xp.array(
        #     ilens, dtype=np.float32) / 2), dtype=np.int32)
        # ilens = self.xp.array(self.xp.ceil(self.xp.array(
        #     ilens, dtype=np.float32) / 2), dtype=np.int32)

        # x: utt_list of frame (remove zeropaded frames) x (input channel num x dim)
        xs = F.swapaxes(xs, 1, 2)
        xs = F.reshape(
            xs, (xs.shape[0], xs.shape[1], xs.shape[2] * xs.shape[3]))
        xs = [xs[i, :ilens[i], :] for i in range(len(ilens))]

        return xs, ilens 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:43,代码来源:EspNet_VGG2L.py

示例11: forward

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import pad_sequence [as 别名]
def forward(self, xs, h):
        inputs = F.pad_sequence(xs)
        gate = self.l(F.concat((inputs[:, 0], h), axis=1))
        return gate 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:6,代码来源:Linear.py

示例12: forward

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import pad_sequence [as 别名]
def forward(self, xs, activation=None):
        ilens = [x.shape[0] for x in xs]
        # xs: (B, T, F)
        xs = F.pad_sequence(xs, padding=-1)
        pad_shape = xs.shape
        # emb: (B*T, E)
        emb = self.enc(xs)
        # ys: (B*T, C)
        ys = self.linear(emb)
        if activation:
            ys = activation(ys)
        # ys: [(T, C), ...]
        ys = F.separate(ys.reshape(pad_shape[0], pad_shape[1], -1), axis=0)
        ys = [F.get_item(y, slice(0, ilen)) for y, ilen in zip(ys, ilens)]
        return ys 
开发者ID:hitachi-speech,项目名称:EEND,代码行数:17,代码来源:models.py

示例13: check_forward

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import pad_sequence [as 别名]
def check_forward(self, xs):
        # Non-finite values does not work for integer values.
        if not numpy.isfinite(self.pad) and \
           numpy.dtype(self.dtype).kind != 'f':
            return

        with disable_debug_mode_if(self.can_include_nan):
            y = functions.pad_sequence(
                xs, length=self.length, padding=self.pad)

        self.assertEqual(y.shape, self.y_shape)
        for i, (length, x) in enumerate(six.moves.zip(self.lengths, self.xs)):
            testing.assert_allclose(y.data[i, 0:length], x)
            testing.assert_allclose(
                y.data[i, length:], self.dtype(self.pad)) 
开发者ID:chainer,项目名称:chainer,代码行数:17,代码来源:test_pad_sequence.py

示例14: check_backward

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import pad_sequence [as 别名]
def check_backward(self, xs, gy):
        # Numerical gradient dos not work with non-finite values.
        # Gradients for integer values are not defined.
        if not numpy.isfinite(self.pad) or numpy.dtype(self.dtype).kind != 'f':
            return

        def f(*xs):
            return functions.pad_sequence(
                xs, length=self.length, padding=self.pad)

        gradient_check.check_backward(f, xs, gy, dtype=numpy.float64) 
开发者ID:chainer,项目名称:chainer,代码行数:13,代码来源:test_pad_sequence.py

示例15: check_double_backward

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import pad_sequence [as 别名]
def check_double_backward(self, xs, gy, ggxs):
        if not numpy.isfinite(self.pad) or numpy.dtype(self.dtype).kind != 'f':
            return

        def f(*xs):
            return functions.pad_sequence(
                xs, length=self.length, padding=self.pad)

        gradient_check.check_double_backward(
            f, xs, gy, ggxs, dtype=numpy.float64,
            **self.check_double_backward_options) 
开发者ID:chainer,项目名称:chainer,代码行数:13,代码来源:test_pad_sequence.py


注:本文中的chainer.functions.pad_sequence方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。