本文整理汇总了Python中mxnet.nd.dot方法的典型用法代码示例。如果您正苦于以下问题:Python nd.dot方法的具体用法?Python nd.dot怎么用?Python nd.dot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.nd
的用法示例。
在下文中一共展示了nd.dot方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import dot [as 别名]
def forward(self,x):
"""
return shape:(batch_size,2000,2)
"""
# Encode layer
question = x[:,0:30]
question = self.Embed(question)
question = self.gru(question)
#interaction layer
interaction = nd.dot(question,self.topic_embedding.data())
interaction = nd.transpose(interaction,axes=(0,2,1))
interaction = interaction.reshape((batch_size*2000,-1))
# interaction = interaction.expand_dims(axis=1)
# print("interaction done")
#agg layer
# interaction = self.pooling(self.conv_2(self.conv_1(interaction)))
# print("agg done")
res = self.mlp_2(self.mlp_1(interaction))
res = res.reshape((batch_size,2000))
return res
#Train Model
示例2: basis_message_func
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import dot [as 别名]
def basis_message_func(self, edges):
"""Message function for basis regularizer"""
ctx = edges.src['h'].context
if self.num_bases < self.num_rels:
# generate all weights from bases
weight = self.weight.data(ctx).reshape(
self.num_bases, self.in_feat * self.out_feat)
weight = nd.dot(self.w_comp.data(ctx), weight).reshape(
self.num_rels, self.in_feat, self.out_feat)
else:
weight = self.weight.data(ctx)
msg = utils.bmm_maybe_select(edges.src['h'], weight, edges.data['type'])
if 'norm' in edges.data:
msg = msg * edges.data['norm']
return {'msg': msg}
示例3: global_norm
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import dot [as 别名]
def global_norm(arrays: Union[Generator[NDArray, NDArray, NDArray], List[NDArray], Tuple[NDArray]]) -> NDArray:
"""
Calculate global norm on list or tuple of NDArrays using this formula:
`global_norm = sqrt(sum([l2norm(p)**2 for p in parameters]))`
:param arrays: list or tuple of parameters to calculate global norm on
:return: single-value NDArray
"""
def _norm(array):
if array.stype == 'default':
x = array.reshape((-1,))
return nd.dot(x, x)
return array.norm().square()
total_norm = nd.add_n(*[_norm(arr) for arr in arrays])
total_norm = nd.sqrt(total_norm)
return total_norm
示例4: linreg
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import dot [as 别名]
def linreg(X, w, b):
"""Linear regression."""
return nd.dot(X, w) + b
示例5: get_distance_matrix
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import dot [as 别名]
def get_distance_matrix(x):
"""Get distance matrix given a matrix. Used in testing."""
square = nd.sum(x ** 2.0, axis=1, keepdims=True)
distance_square = square + square.transpose() - (2.0 * nd.dot(x, x.transpose()))
return nd.sqrt(distance_square)
示例6: matmul_maybe_select
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import dot [as 别名]
def matmul_maybe_select(A, B):
"""Perform Matrix multiplication C = A * B but A could be an integer id vector.
If A is an integer vector, we treat it as multiplying a one-hot encoded tensor.
In this case, the expensive dense matrix multiply can be replaced by a much
cheaper index lookup.
For example,
::
A = [2, 0, 1],
B = [[0.1, 0.2],
[0.3, 0.4],
[0.5, 0.6]]
then matmul_maybe_select(A, B) is equivalent to
::
[[0, 0, 1], [[0.1, 0.2],
[1, 0, 0], * [0.3, 0.4],
[0, 1, 0]] [0.5, 0.6]]
In all other cases, perform a normal matmul.
Parameters
----------
A : mxnet.NDArray
lhs tensor
B : mxnet.NDArray
rhs tensor
Returns
-------
C : mxnet.NDArray
result tensor
"""
if A.dtype in (np.int32, np.int64) and len(A.shape) == 1:
return nd.take(B, A, axis=0)
else:
return nd.dot(A, B)
示例7: forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import dot [as 别名]
def forward(self, x, cheb_polys):
'''
Parameters
----------
x: nd.array, shape is (batch_size * time_step, num_of_vertices, c_in)
cheb_polys: nd.array,
shape is (num_of_vertices, order_of_cheb * num_of_vertices)
Returns
----------
shape is (batch_size * time_step, num_of_vertices, c_out)
'''
_, num_of_vertices, c_in = x.shape
# (batch_size * c_in, num_of_vertices)
x_tmp = x.transpose((0, 2, 1)).reshape((-1, num_of_vertices))
# (batch_size, c_in, order_of_cheb, num_of_vertices)
x_mul = nd.dot(x_tmp, cheb_polys).reshape((-1,
c_in,
self.order_of_cheb,
num_of_vertices))
# (batch_size, num_of_vertices, c_in * order_of_cheb)
x_ker = x_mul.transpose((0, 3, 1, 2)) \
.reshape((-1, num_of_vertices, c_in * self.order_of_cheb))
return self.theta(x_ker)
示例8: forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import dot [as 别名]
def forward(self, adj, feat):
r"""Compute (Dense) Graph SAGE layer.
Parameters
----------
adj : mxnet.NDArray
The adjacency matrix of the graph to apply SAGE Convolution on, when
applied to a unidirectional bipartite graph, ``adj`` should be of shape
should be of shape :math:`(N_{out}, N_{in})`; when applied to a homo
graph, ``adj`` should be of shape :math:`(N, N)`. In both cases,
a row represents a destination node while a column represents a source
node.
feat : mxnet.NDArray or a pair of mxnet.NDArray
If a mxnet.NDArray is given, the input feature of shape :math:`(N, D_{in})`
where :math:`D_{in}` is size of input feature, :math:`N` is the number of
nodes.
If a pair of torch.Tensor is given, the pair must contain two tensors of
shape :math:`(N_{in}, D_{in})` and :math:`(N_{out}, D_{in})`.
Returns
-------
mxnet.NDArray
The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
is size of output feature.
"""
check_eq_shape(feat)
if isinstance(feat, tuple):
feat_src = self.feat_drop(feat[0])
feat_dst = self.feat_drop(feat[1])
else:
feat_src = feat_dst = self.feat_drop(feat)
adj = adj.astype(feat_src.dtype).as_in_context(feat_src.context)
in_degrees = adj.sum(axis=1, keepdims=True)
h_neigh = (nd.dot(adj, feat_src) + feat_dst) / (in_degrees + 1)
rst = self.fc(h_neigh)
# activation
if self.activation is not None:
rst = self.activation(rst)
# normalization
if self._norm is not None:
rst = self._norm(rst)
return rst
示例9: forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import dot [as 别名]
def forward(self, adj, feat, lambda_max=None):
r"""Compute (Dense) Chebyshev Spectral Graph Convolution layer.
Parameters
----------
adj : mxnet.NDArray
The adjacency matrix of the graph to apply Graph Convolution on,
should be of shape :math:`(N, N)`, where a row represents the destination
and a column represents the source.
feat : mxnet.NDArray
The input feature of shape :math:`(N, D_{in})` where :math:`D_{in}`
is size of input feature, :math:`N` is the number of nodes.
lambda_max : float or None, optional
A float value indicates the largest eigenvalue of given graph.
Default: None.
Returns
-------
mxnet.NDArray
The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
is size of output feature.
"""
A = adj.astype(feat.dtype).as_in_context(feat.context)
num_nodes = A.shape[0]
in_degree = 1. / nd.clip(A.sum(axis=1), 1, float('inf')).sqrt()
D_invsqrt = nd.diag(in_degree)
I = nd.eye(num_nodes, ctx=A.context)
L = I - nd.dot(D_invsqrt, nd.dot(A, D_invsqrt))
if lambda_max is None:
# NOTE(zihao): this only works for directed graph.
lambda_max = (nd.linalg.syevd(L)[1]).max()
L_hat = 2 * L / lambda_max - I
Z = [nd.eye(num_nodes, ctx=A.context)]
Zh = self.fc[0](feat)
for i in range(1, self._k):
if i == 1:
Z.append(L_hat)
else:
Z.append(2 * nd.dot(L_hat, Z[-1]) - Z[-2])
Zh = Zh + nd.dot(Z[i], self.fc[i](feat))
if self.bias is not None:
Zh = Zh + self.bias.data(feat.context)
return Zh
示例10: forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import dot [as 别名]
def forward(self, adj, feat):
r"""Compute (Dense) Graph Convolution layer.
Parameters
----------
adj : mxnet.NDArray
The adjacency matrix of the graph to apply Graph Convolution on, when
applied to a unidirectional bipartite graph, ``adj`` should be of shape
should be of shape :math:`(N_{out}, N_{in})`; when applied to a homo
graph, ``adj`` should be of shape :math:`(N, N)`. In both cases,
a row represents a destination node while a column represents a source
node.
feat : torch.Tensor
The input feature.
Returns
-------
mxnet.NDArray
The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
is size of output feature.
"""
adj = adj.astype(feat.dtype).as_in_context(feat.context)
src_degrees = nd.clip(adj.sum(axis=0), a_min=1, a_max=float('inf'))
dst_degrees = nd.clip(adj.sum(axis=1), a_min=1, a_max=float('inf'))
feat_src = feat
if self._norm == 'both':
norm_src = nd.power(src_degrees, -0.5)
shp_src = norm_src.shape + (1,) * (feat.ndim - 1)
norm_src = norm_src.reshape(shp_src).as_in_context(feat.context)
feat_src = feat_src * norm_src
if self._in_feats > self._out_feats:
# mult W first to reduce the feature size for aggregation.
feat_src = nd.dot(feat_src, self.weight.data(feat_src.context))
rst = nd.dot(adj, feat_src)
else:
# aggregate first then mult W
rst = nd.dot(adj, feat_src)
rst = nd.dot(rst, self.weight.data(feat_src.context))
if self._norm != 'none':
if self._norm == 'both':
norm_dst = nd.power(dst_degrees, -0.5)
else: # right
norm_dst = 1.0 / dst_degrees
shp_dst = norm_dst.shape + (1,) * (feat.ndim - 1)
norm_dst = norm_dst.reshape(shp_dst).as_in_context(feat.context)
rst = rst * norm_dst
if self.bias is not None:
rst = rst + self.bias.data(feat.context)
if self._activation is not None:
rst = self._activation(rst)
return rst