当前位置: 首页>>代码示例>>Python>>正文


Python torch.renorm方法代码示例

本文整理汇总了Python中torch.renorm方法的典型用法代码示例。如果您正苦于以下问题:Python torch.renorm方法的具体用法?Python torch.renorm怎么用?Python torch.renorm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.renorm方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: set_dim0

# 需要导入模块: import torch [as 别名]
# 或者: from torch import renorm [as 别名]
def set_dim0(x):
    x = torch.renorm(x, p=2, dim=0, maxnorm=1e2)  # otherwise leaves will explode
    # NOTE: the paper does not mention the square part of the equation but if
    # you try to derive it you get a square term in the equation
    dim0 = torch.sqrt(1 + (x[:, 1:] ** 2).sum(dim=1))
    x[:, 0] = dim0
    return x


# ========================= models 
开发者ID:theSage21,项目名称:lorentz-embeddings,代码行数:12,代码来源:lorentz.py

示例2: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import renorm [as 别名]
def forward(self, input, offsets, ref=None):
        '''

        :param input:  a 1-dim tensor of indices
        :param offset: a 1-dim tensor of offsets
        :param ref: a 2-dim tensor of ref feats, typically the features of ads
        :return:
        '''
        assert (ref is None and not self.atten) or (ref is not None and self.atten)
        # add 1 dim for Embedding
        input = input.view(1,-1)
        # return 1, n_word, n_dim
        embedding = self.embedder(input)
        #print(embedding)
        size = embedding.size()
        # n_word, n_dim
        embedding = embedding.view(size[1],size[2])
        if self.atten:
            size = embedding.size()
            # replicate ref n_word, n_dim
            ref = replicate(ref,offsets,size[0])
            #print(ref)
            # calculate the attention
            #todo
            diff = ref-embedding
            feat_for_atten = torch.cat([embedding,diff,ref],dim=1)
            atten = self.linear1(feat_for_atten)
            atten = self.activation(atten)
            atten = self.linear2(atten)
            # n_word, 1
            atten = self.sigmoid(atten)
            # print(atten)
            embedding = embedding * atten
            #print(embedding)
        # n_sample, n_dim
        res = reduce(embedding,offsets,self.mode)
        # following lines constrain the max norm of embedding.
        size = res.size()
        # n_sample, n_field, n_dim//n_field
        res = res.view(size[0]*self.n_field,size[1]//self.n_field)
        renorm_res = torch.renorm(res,p=self.norm_type,dim=0,maxnorm=self.max_norm)
        renorm_res = renorm_res.contiguous()
        # res = F.normalize(res,p=self.norm_type,dim=2)*self.max_norm
        res = renorm_res.view(size[0],size[1])
        return res 
开发者ID:DiligentPanda,项目名称:Tencent_Ads_Algo_2018,代码行数:47,代码来源:embedding_atten_v2.py

示例3: load_word2vec_format

# 需要导入模块: import torch [as 别名]
# 或者: from torch import renorm [as 别名]
def load_word2vec_format(filename, word_idx, binary=False, normalize=False,
                         encoding='utf8', unicode_errors='ignore'):
    """
    refer to gensim
    load Word Embeddings
    If you trained the C model using non-utf8 encoding for words, specify that
    encoding in `encoding`.
    :param filename :
    :param word_idx :
    :param binary   : a boolean indicating whether the data is in binary word2vec format.
    :param normalize:
    :param encoding :
    :param unicode_errors: errors can be 'strict', 'replace' or 'ignore' and defaults to 'strict'.
    """
    vocab = set()
    print("loading word embedding from %s" % filename)
    with open(filename, 'rb') as fin:
#        header = to_unicode(fin.readline(), encoding=encoding)
#        vocab_size, vector_size = map(int, header.split())  # throws for invalid file format
        vocab_size = 1917494
        vector_size = 300
        word_matrix = torch.zeros(len(word_idx), vector_size)

        def add_word(_word, _weights):
            if _word not in word_idx:
                return
            vocab.add(_word)
            word_matrix[word_idx[_word]] = _weights

        if binary:
            binary_len = np.dtype(np.float32).itemsize * vector_size
            for _ in range(vocab_size):
                # mixed text and binary: read text first, then binary
                word = []
                while True:
                    ch = fin.read(1)
                    if ch == b' ':
                        break
                    if ch != b'\n':  # ignore newlines in front of words (some binary files have)
                        word.append(ch)
                word = to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
                weights = torch.from_numpy(np.fromstring(fin.read(binary_len), dtype=REAL))
                add_word(word, weights)
        else:
            for line_no, line in enumerate(fin):
                parts = to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
                if len(parts) != vector_size + 1:
                    raise ValueError("invalid vector on line %s (is this really the text format?)" % line_no)
                word, weights = parts[0], list(map(float, parts[1:]))
                weights = torch.Tensor(weights)
                add_word(word, weights)
    if word_idx is not None:
        assert (len(word_idx), vector_size) == word_matrix.size()
    if normalize:
        # each row normalize to 1
        word_matrix = torch.renorm(word_matrix, 2, 0, 1)
    print("loaded %d words pre-trained from %s with %d" % (len(vocab), filename, vector_size))
    return word_matrix, vector_size, vocab 
开发者ID:quyingqi,项目名称:kbqa-ar-smcnn,代码行数:60,代码来源:utils.py


注:本文中的torch.renorm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。