当前位置: 首页>>代码示例>>Python>>正文


Python functions.batch_matmul方法代码示例

本文整理汇总了Python中chainer.functions.batch_matmul方法的典型用法代码示例。如果您正苦于以下问题:Python functions.batch_matmul方法的具体用法?Python functions.batch_matmul怎么用?Python functions.batch_matmul使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer.functions的用法示例。


在下文中一共展示了functions.batch_matmul方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: channelwise_inhibited

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import batch_matmul [as 别名]
def channelwise_inhibited(self, h):
        self.c = random.randint(0, 2)
        xp = cuda.get_array_module(h.data)
        num = h.data.shape[0]

        h = F.split_axis(h, 3, 1)
        c = F.reshape(h[self.c], (num, 16, 16))
        z = Variable(xp.zeros_like(c.data), 'AUTO')
        c = F.batch_matmul(c, z)
        c = F.reshape(c, (num, 1, 16, 16))
        hs = []
        for i, s in enumerate(h):
            if i == self.c:
                hs.append(c)
            else:
                hs.append(s)
        return F.concat(hs, 1) 
开发者ID:mitmul,项目名称:ssai-cnn,代码行数:19,代码来源:MnihCNN_rcis.py

示例2: channelwise_inhibited

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import batch_matmul [as 别名]
def channelwise_inhibited(self, h):
        xp = cuda.get_array_module(h.data)
        num = h.data.shape[0]

        h = F.split_axis(h, 3, 1)
        c = F.reshape(h[self.c], (num, 16, 16))
        z = Variable(xp.zeros_like(c.data), 'AUTO')
        c = F.batch_matmul(c, z)
        c = F.reshape(c, (num, 1, 16, 16))
        hs = []
        for i, s in enumerate(h):
            if i == self.c:
                hs.append(c)
            else:
                hs.append(s)
        return F.concat(hs, 1) 
开发者ID:mitmul,项目名称:ssai-cnn,代码行数:18,代码来源:MnihCNN_cis.py

示例3: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import batch_matmul [as 别名]
def __call__(self, inpt, mask):
        mb_size = inpt.data.shape[0]
        max_length = inpt.data.shape[1]

        precomp = F.reshape(F.tanh(self.lin(F.reshape(inpt, (-1, self.Hi)))), (mb_size, -1, self.Ho))

        mask_offset = max_length - len(mask)

        precomp_mask_penalties = self.xp.concatenate(
            [
                self.xp.zeros((mb_size, mask_offset), dtype=self.xp.float32),
                -10000 * (1 - self.xp.concatenate([
                    self.xp.reshape(mask_elem, (mb_size, 1)).astype(self.xp.float32) for mask_elem in mask], 1))
            ], 1
        )

        def compute_copy_coefficients(state):
            betas = F.reshape(batch_matmul(precomp, state), (mb_size, -1))
            masked_betas = betas + precomp_mask_penalties
            return masked_betas

        return compute_copy_coefficients 
开发者ID:fabiencro,项目名称:knmt,代码行数:24,代码来源:attention.py

示例4: calc_loss_style

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import batch_matmul [as 别名]
def calc_loss_style(hout_dict,hcomp_dict,hgt_dict):
    layers = hgt_dict.keys()
    for i,layer_name in enumerate(layers):
        B,C,H,W = hout_dict[layer_name].shape
        hout = F.reshape(hout_dict[layer_name],(B,C,H*W))
        hcomp = F.reshape(hcomp_dict[layer_name],(B,C,H*W))
        hgt = F.reshape(hgt_dict[layer_name],(B,C,H*W))
        
        hout_gram = F.batch_matmul(hout,hout,transb=True)
        hcomp_gram = F.batch_matmul(hcomp,hcomp,transb=True)
        hgt_gram = F.batch_matmul(hgt,hgt,transb=True)
        
        if i==0: 
            L_style_out = F.mean_absolute_error(hout_gram,hgt_gram)/(C*H*W)
            L_style_comp = F.mean_absolute_error(hcomp_gram,hgt_gram)/(C*H*W)
        else:
            L_style_out += F.mean_absolute_error(hout_gram,hgt_gram)/(C*H*W)
            L_style_comp += F.mean_absolute_error(hcomp_gram,hgt_gram)/(C*H*W)        

    return L_style_out + L_style_comp 
开发者ID:SeitaroShinagawa,项目名称:chainer-partial_convolution_image_inpainting,代码行数:22,代码来源:updater.py

示例5: attend

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import batch_matmul [as 别名]
def attend(self, query, key, value, mask, minfs=None):
        """
        Input shapes:
            q=(b, units, dec_l), k=(b, units, enc_l),
            v=(b, units, dec_l, enc_l), m=(b, dec_l, enc_l)
        """

        # Calculate Attention Scores with Mask for Zero-padded Areas
        pre_a = F.batch_matmul(query, key, transa=True)  # (b, dec_l, enc_l)
        minfs = self.xp.full(pre_a.shape, -np.inf, pre_a.dtype) \
            if minfs is None else minfs
        pre_a = F.where(mask, pre_a, minfs)
        a = F.softmax(pre_a, axis=2)
        # if values in axis=2 are all -inf, they become nan. thus do re-mask.
        a = F.where(self.xp.isnan(a.data),
                    self.xp.zeros(a.shape, dtype=a.dtype), a)
        reshaped_a = a[:, None]  # (b, 1, dec_xl, enc_l)

        # Calculate Weighted Sum
        pre_c = F.broadcast_to(reshaped_a, value.shape) * value
        c = F.sum(pre_c, axis=3, keepdims=True)  # (b, units, dec_xl, 1)
        return c 
开发者ID:soskek,项目名称:convolutional_seq2seq,代码行数:24,代码来源:net.py

示例6: attend

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import batch_matmul [as 别名]
def attend(self, encoded_features):
        self.out_lstm.reset_state()
        transformed_encoded_features = F.concat([F.expand_dims(self.transform_encoded_features(feature), axis=1) for feature in encoded_features], axis=1)
        concat_encoded_features = F.concat([F.expand_dims(e, axis=1) for e in encoded_features], axis=1)

        lstm_output = self.xp.zeros_like(encoded_features[0])
        outputs = []
        for _ in range(self.num_labels):
            transformed_lstm_output = self.transform_out_lstm_feature(lstm_output)
            attended_feats = []
            for transformed_encoded_feature in F.separate(transformed_encoded_features, axis=1):
                attended_feat = transformed_encoded_feature + transformed_lstm_output
                attended_feat = F.tanh(attended_feat)
                attended_feats.append(self.generate_attended_feat(attended_feat))

            attended_feats = F.concat(attended_feats, axis=1)
            alphas = F.softmax(attended_feats, axis=1)

            lstm_input_feature = F.batch_matmul(alphas, concat_encoded_features, transa=True)
            lstm_input_feature = F.squeeze(lstm_input_feature, axis=1)
            lstm_output = self.out_lstm(lstm_input_feature)
            outputs.append(lstm_output)
        return outputs 
开发者ID:Bartzi,项目名称:see,代码行数:25,代码来源:fsns.py

示例7: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import batch_matmul [as 别名]
def __call__(self, a_list, state, batch_size, xp):
        e_list = []
        sum_e = xp.zeros((batch_size, 1), dtype=xp.float32)
        for a in a_list:
            w = reshape(batch_matmul(state['h2'], a, transa=True), (batch_size, 1))
            w.data = xp.clip(w.data, -40, 40)
            e = exp(w)
            e_list.append(e)
            sum_e = sum_e + e

        context = xp.zeros((batch_size, self.hidden_size), dtype=xp.float32)

        for a, e in zip(a_list, e_list):
            e /= sum_e
            context = context + reshape(batch_matmul(a, e), (batch_size, self.hidden_size))
        return context, e_list, sum_e 
开发者ID:aistairc,项目名称:seq2seq_temporal_attention,代码行数:18,代码来源:S2S_att.py

示例8: extract_style_feature

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import batch_matmul [as 别名]
def extract_style_feature(self, images, masks=None):
        xp = self.xp
        mean = xp.array([103.939, 116.779, 123.68], 'float32')  # BGR
        images = images[:, ::-1] * 255 - mean[None, :, None, None]
        features = self.vgg16(images, layers=['conv1_2', 'conv2_2', 'conv3_3', 'conv4_3']).values()
        if masks is None:
            masks = xp.ones((images.shape[0], images.shape[2], images.shape[3]))

        style_features = []
        for feature in features:
            scale = masks.shape[-1] / feature.shape[-1]
            m = cf.average_pooling_2d(masks[:, None, :, :], scale, scale).data
            dim = feature.shape[1]

            m = m.reshape((m.shape[0], -1))
            f2 = feature.transpose((0, 2, 3, 1))
            f2 = f2.reshape((f2.shape[0], -1, f2.shape[-1]))
            f2 *= xp.sqrt(m)[:, :, None]
            f2 = cf.batch_matmul(f2.transpose((0, 2, 1)), f2)
            f2 /= dim * m.sum(axis=1)[:, None, None]
            style_features.append(f2)

        return style_features 
开发者ID:hiroharu-kato,项目名称:style_transfer_3d,代码行数:25,代码来源:main.py

示例9: extract_features

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import batch_matmul [as 别名]
def extract_features(vgg16, images, masks=None):
    mean = cp.array([103.939, 116.779, 123.68], 'float32')  # BGR
    images = images[:, ::-1] * 255 - mean[None, :, None, None]
    features = vgg16(images, layers=['conv1_2', 'conv2_2', 'conv3_3', 'conv4_3']).values()

    if masks is None:
        masks = cp.ones((images.shape[0], images.shape[2], images.shape[3]), 'float32')
    else:
        masks = masks.data

    style_features = []
    for f in features:
        scale = masks.shape[-1] / f.shape[-1]
        m = cf.average_pooling_2d(masks[:, None, :, :], scale, scale).data
        dim = f.shape[1]

        m = m.reshape((m.shape[0], -1))
        f2 = f.transpose((0, 2, 3, 1))
        f2 = f2.reshape((f2.shape[0], -1, f2.shape[-1]))
        f2 *= cp.sqrt(m)[:, :, None]
        f2 = cf.batch_matmul(f2.transpose((0, 2, 1)), f2)
        f2 /= dim * m.sum(axis=1)[:, None, None]
        style_features.append(f2)

    return style_features 
开发者ID:hiroharu-kato,项目名称:style_transfer_3d,代码行数:27,代码来源:train.py

示例10: batch_matmul

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import batch_matmul [as 别名]
def batch_matmul(a, b, transa=False, transb=False):
    return F.matmul(a[:, :, None], b, transa=transa, transb=transb) 
开发者ID:fabiencro,项目名称:knmt,代码行数:4,代码来源:attention.py

示例11: compute_ctxt_demux

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import batch_matmul [as 别名]
def compute_ctxt_demux(self, fb_concat, mask):
        mb_size, nb_elems, Hi = fb_concat.data.shape
        assert Hi == self.Hi
        assert mb_size == 1
        assert len(mask) == 0

        precomputed_al_factor = F.reshape(self.al_lin_h(
            F.reshape(fb_concat, (mb_size * nb_elems, self.Hi))), (mb_size, nb_elems, self.Ha))

#         concatenated_mask = F.concat([F.reshape(mask_elem, (mb_size, 1)) for mask_elem in mask], 1)

        def compute_ctxt(previous_state, prev_word_embedding=None):
            current_mb_size = previous_state.data.shape[0]

            al_factor = F.broadcast_to(precomputed_al_factor, (current_mb_size, nb_elems, self.Ha))
#             used_fb_concat = F.broadcast_to(fb_concat, (current_mb_size, nb_elems, Hi))
#             used_concatenated_mask = F.broadcast_to(concatenated_mask, (current_mb_size, nb_elems))

            state_al_factor = self.al_lin_s(previous_state)
            
            #As suggested by Isao Goto
            if prev_word_embedding is not None:
                state_al_factor = state_al_factor + self.al_lin_y(prev_word_embedding)
            
            state_al_factor_bc = F.broadcast_to(F.reshape(state_al_factor, (current_mb_size, 1, self.Ha)), (current_mb_size, nb_elems, self.Ha))
            a_coeffs = F.reshape(self.al_lin_o(F.reshape(F.tanh(state_al_factor_bc + al_factor),
                                                         (current_mb_size * nb_elems, self.Ha))), (current_mb_size, nb_elems))


#             with cuda.get_device_from_array(used_concatenated_mask.data):
#                 a_coeffs = a_coeffs - 10000 * (1-used_concatenated_mask.data)

            attn = F.softmax(a_coeffs)

#             ci = F.reshape(F.batch_matmul(attn, used_fb_concat, transa = True), (current_mb_size, self.Hi))

            ci = F.reshape(F.matmul(attn, F.reshape(fb_concat, (nb_elems, Hi))), (current_mb_size, self.Hi))

            return ci, attn

        return compute_ctxt 
开发者ID:fabiencro,项目名称:knmt,代码行数:43,代码来源:attention.py

示例12: compute_logits

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import batch_matmul [as 别名]
def compute_logits(self, new_states, concatenated, attn):
        new_output_state = new_states[-1]

        all_concatenated = F.concat((concatenated, new_output_state))
        logits = self.decoder_chain.lin_o(self.decoder_chain.maxo(all_concatenated))

        if self.lexicon_probability_matrix is not None:
            current_mb_size = new_output_state.data.shape[0]
            assert self.mb_size is None or current_mb_size <= self.mb_size
            lexicon_probability_matrix = self.lexicon_probability_matrix[:current_mb_size]

            # Just making sure data shape is as expected
            attn_mb_size, max_source_length_attn = attn.data.shape
            assert attn_mb_size == current_mb_size
            lex_mb_size, max_source_length_lexicon, v_size_lexicon = lexicon_probability_matrix.shape
            assert max_source_length_lexicon == max_source_length_attn
            assert logits.data.shape == (current_mb_size, v_size_lexicon)

            if self.demux:
                assert lex_mb_size == 1
                weighted_lex_probs = F.reshape(
                    matmul_constant(attn, lexicon_probability_matrix.reshape(lexicon_probability_matrix.shape[1],
                                                                             lexicon_probability_matrix.shape[2])),
                    logits.data.shape)
            else:
                assert lex_mb_size == current_mb_size

    #                 weighted_lex_probs = F.reshape(
    #                         F.batch_matmul(attn, ConstantFunction(lexicon_probability_matrix)(), transa = True),
    #                                                logits.data.shape)

                weighted_lex_probs = F.reshape(
                    batch_matmul_constant(attn, lexicon_probability_matrix, transa=True),
                    logits.data.shape)

            logits += F.log(weighted_lex_probs + self.lex_epsilon)
        return logits 
开发者ID:fabiencro,项目名称:knmt,代码行数:39,代码来源:decoder_cells.py

示例13: batch_matmul_last_dims

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import batch_matmul [as 别名]
def batch_matmul_last_dims(A, B, transa=False, transb=False):
    assert A.data.shape[:-2] == B.data.shape[:-2]
    reshaped_A = F.reshape(A, (-1,) + A.data.shape[-2:])
    reshaped_B = F.reshape(B, (-1,) + B.data.shape[-2:])
    reshaped_result = F.batch_matmul(reshaped_A, reshaped_B, transa=transa, transb=transb)
    result = F.reshape(reshaped_result, A.data.shape[:-2] + reshaped_result.data.shape[-2:])
    return result

########################################################################
# Multihead Attention
# 
开发者ID:fabiencro,项目名称:knmt,代码行数:13,代码来源:multi_attention.py

示例14: forward

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import batch_matmul [as 别名]
def forward(self, inputs, device):
        x1, x2 = inputs
        with testing.assert_warns(DeprecationWarning):
            y = F.batch_matmul(
                x1, x2, transa=self.transa, transb=self.transb)
        return y, 
开发者ID:chainer,项目名称:chainer,代码行数:8,代码来源:test_matmul.py

示例15: query

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import batch_matmul [as 别名]
def query(self, u):
        xp = backend.get_array_module(u)
        size = self.m.shape[1]
        inds = xp.arange(size - 1, -1, -1, dtype=numpy.int32)
        tm = self.TA(inds)
        tc = self.TC(inds)
        tm = F.broadcast_to(tm, self.m.shape)
        tc = F.broadcast_to(tc, self.c.shape)
        p = F.softmax(F.batch_matmul(self.m + tm, u))
        o = F.batch_matmul(F.swapaxes(self.c + tc, 2, 1), p)
        o = F.squeeze(o, -1)
        u = o + u
        return u 
开发者ID:pfnet,项目名称:pfio,代码行数:15,代码来源:memnn.py


注:本文中的chainer.functions.batch_matmul方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。