当前位置: 首页>>代码示例>>Python>>正文


Python function.sum方法代码示例

本文整理汇总了Python中dgl.function.sum方法的典型用法代码示例。如果您正苦于以下问题:Python function.sum方法的具体用法?Python function.sum怎么用?Python function.sum使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在dgl.function的用法示例。


在下文中一共展示了function.sum方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: from dgl import function [as 别名]
# 或者: from dgl.function import sum [as 别名]
def forward(self, h):
        if self.dropout:
            h = self.dropout(h)
        h = torch.mm(h, self.weight)
        # normalization by square root of src degree
        h = h * self.g.ndata['norm']
        self.g.ndata['h'] = h
        self.g.update_all(fn.copy_src(src='h', out='m'),
                          fn.sum(msg='m', out='h'))
        h = self.g.ndata.pop('h')
        # normalization by square root of dst degree
        h = h * self.g.ndata['norm']
        # bias
        if self.bias is not None:
            h = h + self.bias
        if self.activation:
            h = self.activation(h)
        return h 
开发者ID:dmlc,项目名称:dgl,代码行数:20,代码来源:gcn_spmv.py

示例2: forward

# 需要导入模块: from dgl import function [as 别名]
# 或者: from dgl.function import sum [as 别名]
def forward(self, ndata):
        projections = []
        for feature, data in ndata.items():
            if feature == dgl.NID or feature.endswith('__len'):
                # This is an additional feature indicating the length of the ``feature``
                # column; we shouldn't process this.
                continue

            module = self.inputs[feature]
            if isinstance(module, (BagOfWords, BagOfWordsPretrained)):
                # Textual feature; find the length and pass it to the textual module.
                length = ndata[feature + '__len']
                result = module(data, length)
            else:
                result = module(data)
            projections.append(result)

        return torch.stack(projections, 1).sum(1) 
开发者ID:dmlc,项目名称:dgl,代码行数:20,代码来源:layers.py

示例3: evaluate

# 需要导入模块: from dgl import function [as 别名]
# 或者: from dgl.function import sum [as 别名]
def evaluate(epoch, args, model, feats, labels, train, val, test):
    with torch.no_grad():
        batch_size = args.eval_batch_size
        if batch_size <= 0:
            pred = model(feats)
        else:
            pred = []
            num_nodes = labels.shape[0]
            n_batch = (num_nodes + batch_size - 1) // batch_size
            for i in range(n_batch):
                batch_start = i * batch_size
                batch_end = min((i + 1) * batch_size, num_nodes)
                batch_feats = [feat[batch_start: batch_end] for feat in feats]
                pred.append(model(batch_feats))
            pred = torch.cat(pred)

        pred = torch.argmax(pred, dim=1)
        correct = (pred == labels).float()
        train_acc = correct[train].sum() / len(train)
        val_acc = correct[val].sum() / len(val)
        test_acc = correct[test].sum() / len(test)
        return train_acc, val_acc, test_acc 
开发者ID:dmlc,项目名称:dgl,代码行数:24,代码来源:sign.py

示例4: forward

# 需要导入模块: from dgl import function [as 别名]
# 或者: from dgl.function import sum [as 别名]
def forward(self, graph, feat):
        graph = graph.local_var()
        feat_c = feat.clone().detach().requires_grad_(False)
        q, k, v = self.q_proj(feat), self.k_proj(feat_c), self.v_proj(feat_c)
        q = q.view(-1, self._num_heads, self._out_feats)
        k = k.view(-1, self._num_heads, self._out_feats)
        v = v.view(-1, self._num_heads, self._out_feats)
        graph.ndata.update({'ft': v, 'el': k, 'er': q}) # k,q instead of q,k, the edge_softmax is applied on incoming edges
        # compute edge attention
        graph.apply_edges(fn.u_dot_v('el', 'er', 'e'))
        e =  graph.edata.pop('e') / math.sqrt(self._out_feats * self._num_heads)
        graph.edata['a'] = edge_softmax(graph, e).unsqueeze(-1)
       # message passing
        graph.update_all(fn.u_mul_e('ft', 'a', 'm'),
                         fn.sum('m', 'ft2'))
        rst = graph.ndata['ft2']
        # residual
        rst = rst.view(feat.shape) + feat
        if self._trans:
            rst = self.ln1(rst)
            rst = self.ln1(rst+self.FFN(rst))
            # use the same layer norm, see the author's code
        return rst 
开发者ID:dmlc,项目名称:dgl,代码行数:25,代码来源:modules.py

示例5: forward

# 需要导入模块: from dgl import function [as 别名]
# 或者: from dgl.function import sum [as 别名]
def forward(self, nf):
        h = nf.layers[0].data['preprocess']
        h = self.linear(h)

        skip_start = (0 == self.n_layers-1)
        if skip_start:
            h = torch.cat((h, self.activation(h)), dim=1)
        else:
            h = self.activation(h)

        for i, layer in enumerate(self.layers):
            nf.layers[i].data['h'] = h
            nf.block_compute(i,
                             fn.copy_src(src='h', out='m'),
                             fn.sum(msg='m', out='h'),
                             layer)
            h = nf.layers[i+1].data.pop('activation')

        return h 
开发者ID:dmlc,项目名称:dgl,代码行数:21,代码来源:gcn_cv_sc.py

示例6: forward

# 需要导入模块: from dgl import function [as 别名]
# 或者: from dgl.function import sum [as 别名]
def forward(self, g, h):
        g = g.local_var()
        if not self.use_pp or not self.training:
            norm = self.get_norm(g)
            g.ndata['h'] = h
            g.update_all(fn.copy_src(src='h', out='m'),
                         fn.sum(msg='m', out='h'))
            ah = g.ndata.pop('h')
            h = self.concat(h, ah, norm)

        if self.dropout:
            h = self.dropout(h)

        h = self.linear(h)
        h = self.lynorm(h)
        if self.activation:
            h = self.activation(h)
        return h 
开发者ID:dmlc,项目名称:dgl,代码行数:20,代码来源:modules.py

示例7: forward

# 需要导入模块: from dgl import function [as 别名]
# 或者: from dgl.function import sum [as 别名]
def forward(self, g, lg, x, y, deg_g, deg_lg, pm_pd):
        pmpd_x = F.embedding(pm_pd, x)

        sum_x = sum(theta(z) for theta, z in zip(self.theta_list, self.aggregate(g, x)))

        g.set_e_repr({'y' : y})
        g.update_all(fn.copy_edge(edge='y', out='m'), fn.sum('m', 'pmpd_y'))
        pmpd_y = g.pop_n_repr('pmpd_y')

        x = self.theta_x(x) + self.theta_deg(deg_g * x) + sum_x + self.theta_y(pmpd_y)
        n = self.out_feats // 2
        x = th.cat([x[:, :n], F.relu(x[:, n:])], 1)
        x = self.bn_x(x)

        sum_y = sum(gamma(z) for gamma, z in zip(self.gamma_list, self.aggregate(lg, y)))

        y = self.gamma_y(y) + self.gamma_deg(deg_lg * y) + sum_y + self.gamma_x(pmpd_x)
        y = th.cat([y[:, :n], F.relu(y[:, n:])], 1)
        y = self.bn_y(y)

        return x, y 
开发者ID:dmlc,项目名称:dgl,代码行数:23,代码来源:gnn.py

示例8: forward

# 需要导入模块: from dgl import function [as 别名]
# 或者: from dgl.function import sum [as 别名]
def forward(self, h):
        if self.dropout:
            h = mx.nd.Dropout(h, p=self.dropout)
        h = mx.nd.dot(h, self.weight.data(h.context))
        # normalization by square root of src degree
        h = h * self.g.ndata['norm']
        self.g.ndata['h'] = h
        self.g.update_all(fn.copy_src(src='h', out='m'),
                          fn.sum(msg='m', out='h'))
        h = self.g.ndata.pop('h')
        # normalization by square root of dst degree
        h = h * self.g.ndata['norm']
        # bias
        if self.bias is not None:
            h = h + self.bias.data(h.context)
        if self.activation:
            h = self.activation(h)
        return h 
开发者ID:dmlc,项目名称:dgl,代码行数:20,代码来源:gcn_spmv.py

示例9: forward

# 需要导入模块: from dgl import function [as 别名]
# 或者: from dgl.function import sum [as 别名]
def forward(self, nf):
        h = nf.layers[0].data['preprocess']
        features = nf.layers[0].data['features']
        h = mx.nd.concat(h, features)
        h = self.input_layer(h)

        for i, layer in enumerate(self.layers):
            nf.layers[i].data['h'] = h
            parent_nid = dgl.utils.toindex(nf.layer_parent_nid(i+1))
            layer_nid = nf.map_from_parent_nid(i, parent_nid,
                                               remap_local=True).as_in_context(h.context)
            # activation from previous layer of the nodes in (i+1)-th layer, used in graphSAGE
            self_h = h[layer_nid]
            nf.layers[i+1].data['self_h'] = self_h
            nf.block_compute(i,
                             fn.copy_src(src='h', out='m'),
                             fn.sum(msg='m', out='h'),
                             layer)
            h = nf.layers[i+1].data.pop('activation')

        return h 
开发者ID:dmlc,项目名称:dgl,代码行数:23,代码来源:graphsage_cv.py

示例10: forward

# 需要导入模块: from dgl import function [as 别名]
# 或者: from dgl.function import sum [as 别名]
def forward(self, nf):
        nf.layers[0].data['activation'] = nf.layers[0].data['features']

        for i, layer in enumerate(self.layers):
            h = nf.layers[i].data.pop('activation')
            if self.dropout:
                h = mx.nd.Dropout(h, p=self.dropout)
            nf.layers[i].data['h'] = h
            degs = nf.layer_in_degree(i + 1).astype('float32').as_in_context(h.context)
            nf.layers[i + 1].data['norm'] = mx.nd.expand_dims(1./degs, 1)
            nf.block_compute(i,
                             fn.copy_src(src='h', out='m'),
                             fn.sum(msg='m', out='h'),
                             layer)

        h = nf.layers[-1].data.pop('activation')
        return h 
开发者ID:dmlc,项目名称:dgl,代码行数:19,代码来源:gcn_ns_sc.py

示例11: forward

# 需要导入模块: from dgl import function [as 别名]
# 或者: from dgl.function import sum [as 别名]
def forward(self, subg, vertices):
        assert vertices is not None
        if self.use_spmv:
            feat = subg.layers[0].data['in']
            subg.layers[0].data['cat'] = mx.nd.concat(feat, subg.layers[0].data['h'],
                                                      dim=1)

            msg_func = fn.copy_src(src='cat', out='m')
            reduce_func = fn.sum(msg='m', out='accum')
        else:
            msg_func = gcn_msg
            reduce_func = gcn_reduce
        deg = mx.nd.expand_dims(subg.layer_in_degree(1), 1).astype(np.float32)
        # We don't need dropout for inference.
        if self.dropout:
            subg.layers[0].data['h'] = mx.nd.Dropout(subg.layers[0].data['h'], p=self.dropout)
        subg.block_compute(0, msg_func, reduce_func, None)
        ctx = subg.layers[1].data['accum'].context
        if self.use_spmv:
            subg.layers[0].data.pop('cat')
            deg = deg.as_in_context(ctx)
            subg.layers[1].data['accum'] = subg.layers[1].data['accum'] / deg
        subg.apply_layer(1, self.update, inplace=self.inference)
        subg.layers[1].data.pop('accum')
        return subg.layers[1].data['h1'] 
开发者ID:dmlc,项目名称:dgl,代码行数:27,代码来源:sse_batch.py

示例12: pagerank_reduce_func

# 需要导入模块: from dgl import function [as 别名]
# 或者: from dgl.function import sum [as 别名]
def pagerank_reduce_func(nodes):
    msgs = torch.sum(nodes.mailbox['pv'], dim=1)
    pv = (1 - DAMP) / N + DAMP * msgs
    return {'pv' : pv}


###############################################################################
# The reduce functions are **Node UDFs**.  Node UDFs have a single argument
# ``nodes``, which has two members ``data`` and ``mailbox``.  ``data``
# contains the node features and ``mailbox`` contains all incoming message
# features, stacked along the second dimension (hence the ``dim=1`` argument).
#
# The message UDF works on a batch of edges, whereas the reduce UDF works on
# a batch of edges but outputs a batch of nodes. Their relationships are as
# follows:
#
# .. image:: https://i.imgur.com/kIMiuFb.png
#
# Register the message function and reduce function, which will be called
# later by DGL. 
开发者ID:dmlc,项目名称:dgl,代码行数:22,代码来源:3_pagerank.py

示例13: pagerank_builtin

# 需要导入模块: from dgl import function [as 别名]
# 或者: from dgl.function import sum [as 别名]
def pagerank_builtin(g):
    g.ndata['pv'] = g.ndata['pv'] / g.ndata['deg']
    g.update_all(message_func=fn.copy_src(src='pv', out='m'),
                 reduce_func=fn.sum(msg='m',out='m_sum'))
    g.ndata['pv'] = (1 - DAMP) / N + DAMP * g.ndata['m_sum']


###############################################################################
# In the previous example code, you directly provide the UDFs to the :func:`update_all <DGLGraph.update_all>`
# as its arguments.
# This will override the previously registered UDFs.
#
# In addition to cleaner code, using ``builtin`` functions also gives DGL the
# opportunity to fuse operations together. This results in faster execution.  For
# example, DGL will fuse the ``copy_src`` message function and ``sum`` reduce
# function into one sparse matrix-vector (spMV) multiplication.
#
# `The following section <spmv_>`_ describes why spMV can speed up the scatter-gather
# phase in PageRank.  For more details about the ``builtin`` functions in DGL,
# see :doc:`API reference <../../api/python/function>`.
#
# You can also download and run the different code examples to see the differences. 
开发者ID:dmlc,项目名称:dgl,代码行数:24,代码来源:3_pagerank.py

示例14: forward

# 需要导入模块: from dgl import function [as 别名]
# 或者: from dgl.function import sum [as 别名]
def forward(self, u_hat, routing_num=1):
        self.g.edata['u_hat'] = u_hat

        for r in range(routing_num):
            # step 1 (line 4): normalize over out edges
            edges_b = self.g.edata['b'].view(self.in_nodes, self.out_nodes)
            self.g.edata['c'] = F.softmax(edges_b, dim=1).view(-1, 1)
            self.g.edata['c u_hat'] = self.g.edata['c'] * self.g.edata['u_hat']

            # Execute step 1 & 2
            self.g.update_all(fn.copy_e('c u_hat', 'm'), fn.sum('m', 's'))

            # step 3 (line 6)
            self.g.nodes[self.out_indx].data['v'] = self.squash(self.g.nodes[self.out_indx].data['s'], dim=1)

            # step 4 (line 7)
            v = th.cat([self.g.nodes[self.out_indx].data['v']] * self.in_nodes, dim=0)
            self.g.edata['b'] = self.g.edata['b'] + (self.g.edata['u_hat'] * v).sum(dim=1, keepdim=True) 
开发者ID:dmlc,项目名称:dgl,代码行数:20,代码来源:2_capsule.py

示例15: test_reverse_shared_frames

# 需要导入模块: from dgl import function [as 别名]
# 或者: from dgl.function import sum [as 别名]
def test_reverse_shared_frames():
    g = dgl.DGLGraph()
    g.add_nodes(3)
    g.add_edges([0, 1, 2], [1, 2, 1])
    g.ndata['h'] = F.tensor([[0.], [1.], [2.]])
    g.edata['h'] = F.tensor([[3.], [4.], [5.]])

    rg = g.reverse(share_ndata=True, share_edata=True)
    assert F.allclose(g.ndata['h'], rg.ndata['h'])
    assert F.allclose(g.edata['h'], rg.edata['h'])
    assert F.allclose(g.edges[[0, 2], [1, 1]].data['h'],
                      rg.edges[[1, 1], [0, 2]].data['h'])

    rg.ndata['h'] = rg.ndata['h'] + 1
    assert F.allclose(rg.ndata['h'], g.ndata['h'])

    g.edata['h'] = g.edata['h'] - 1
    assert F.allclose(rg.edata['h'], g.edata['h'])

    src_msg = fn.copy_src(src='h', out='m')
    sum_reduce = fn.sum(msg='m', out='h')

    rg.update_all(src_msg, sum_reduce)
    assert F.allclose(g.ndata['h'], rg.ndata['h']) 
开发者ID:dmlc,项目名称:dgl,代码行数:26,代码来源:test_transform.py


注:本文中的dgl.function.sum方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。