本文整理汇总了Python中mxnet.ndarray.batch_dot方法的典型用法代码示例。如果您正苦于以下问题:Python ndarray.batch_dot方法的具体用法?Python ndarray.batch_dot怎么用?Python ndarray.batch_dot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.ndarray
的用法示例。
在下文中一共展示了ndarray.batch_dot方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_neg
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import batch_dot [as 别名]
def create_neg(self, neg_head):
if neg_head:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
heads = heads.reshape(num_chunks, neg_sample_size, hidden_dim)
heads = mx.nd.transpose(heads, axes=(0,2,1))
tails = tails.expand_dims(2)
relations = relations.reshape(-1, self.relation_dim, self.entity_dim)
tmp = mx.nd.batch_dot(relations, tails).squeeze()
tmp = tmp.reshape(num_chunks, chunk_size, hidden_dim)
return nd.linalg_gemm2(tmp, heads)
return fn
else:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
tails = tails.reshape(num_chunks, neg_sample_size, hidden_dim)
tails = mx.nd.transpose(tails, axes=(0,2,1))
heads = heads.expand_dims(2)
relations = relations.reshape(-1, self.relation_dim, self.entity_dim)
tmp = mx.nd.batch_dot(relations, heads).squeeze()
tmp = tmp.reshape(num_chunks, chunk_size, hidden_dim)
return nd.linalg_gemm2(tmp, tails)
return fn
示例2: gram_matrix
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import batch_dot [as 别名]
def gram_matrix(y):
(b, ch, h, w) = y.shape
features = y.reshape((b, ch, w * h))
#features_t = F.SwapAxis(features,1, 2)
gram = F.batch_dot(features, features, transpose_b=True) / (ch * h * w)
return gram
示例3: forward
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import batch_dot [as 别名]
def forward(self, X):
# input X is a 3D feature map
self.P = F.batch_dot(F.broadcast_to(self.weight.data(), shape=(self.gram.shape)), self.gram)
return F.batch_dot(F.SwapAxis(self.P,1,2).broadcast_to((X.shape[0], self.C, self.C)), X.reshape((0,0,X.shape[2]*X.shape[3]))).reshape(X.shape)
示例4: prepare
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import batch_dot [as 别名]
def prepare(self, g, gpu_id, trace=False):
head_ids, tail_ids = g.all_edges(order='eid')
projection = self.projection_emb(g.edata['id'], gpu_id, trace)
projection = projection.reshape(-1, self.entity_dim, self.relation_dim)
head_emb = g.ndata['emb'][head_ids.as_in_context(g.ndata['emb'].context)].expand_dims(axis=-2)
tail_emb = g.ndata['emb'][tail_ids.as_in_context(g.ndata['emb'].context)].expand_dims(axis=-2)
g.edata['head_emb'] = nd.batch_dot(head_emb, projection).squeeze()
g.edata['tail_emb'] = nd.batch_dot(tail_emb, projection).squeeze()
示例5: edge_func
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import batch_dot [as 别名]
def edge_func(self, edges):
head = edges.src['emb']
tail = edges.dst['emb'].expand_dims(2)
rel = edges.data['emb']
rel = rel.reshape(-1, self.relation_dim, self.entity_dim)
score = head * mx.nd.batch_dot(rel, tail).squeeze()
# TODO: check if use self.gamma
return {'score': mx.nd.sum(score, -1)}
# return {'score': self.gamma - th.norm(score, p=1, dim=-1)}
示例6: forward
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import batch_dot [as 别名]
def forward(self, X):
# input X is a 3D feature map
self.P = F.batch_dot(F.broadcast_to(self.weight.data(), shape=(self.gram.shape)), self.gram.data())
return F.batch_dot(F.SwapAxis(self.P,1,2).broadcast_to((X.shape[0], self.C, self.C)), X.reshape((0,0,X.shape[2]*X.shape[3]))).reshape(X.shape)
示例7: create_neg_prepare
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import batch_dot [as 别名]
def create_neg_prepare(self, neg_head):
if neg_head:
def fn(rel_id, num_chunks, head, tail, gpu_id, trace=False):
# pos node, project to its relation
projection = self.projection_emb(rel_id, gpu_id, trace)
projection = projection.reshape(-1, self.entity_dim, self.relation_dim)
tail = tail.reshape(-1, 1, self.entity_dim)
tail = nd.batch_dot(tail, projection)
tail = tail.reshape(num_chunks, -1, self.relation_dim)
# neg node, each project to all relations
projection = projection.reshape(num_chunks, -1, self.entity_dim, self.relation_dim)
head = head.reshape(num_chunks, -1, 1, self.entity_dim)
num_rels = projection.shape[1]
num_nnodes = head.shape[1]
heads = []
for i in range(num_chunks):
head_negs = []
for j in range(num_nnodes):
head_neg = head[i][j]
head_neg = head_neg.reshape(1, 1, self.entity_dim)
head_neg = nd.broadcast_axis(head_neg, axis=0, size=num_rels)
head_neg = nd.batch_dot(head_neg, projection[i])
head_neg = head_neg.squeeze(axis=1)
head_negs.append(head_neg)
head_negs = nd.stack(*head_negs, axis=1)
heads.append(head_negs)
head = nd.stack(*heads)
return head, tail
return fn
else:
def fn(rel_id, num_chunks, head, tail, gpu_id, trace=False):
# pos node, project to its relation
projection = self.projection_emb(rel_id, gpu_id, trace)
projection = projection.reshape(-1, self.entity_dim, self.relation_dim)
head = head.reshape(-1, 1, self.entity_dim)
head = nd.batch_dot(head, projection).squeeze()
head = head.reshape(num_chunks, -1, self.relation_dim)
projection = projection.reshape(num_chunks, -1, self.entity_dim, self.relation_dim)
tail = tail.reshape(num_chunks, -1, 1, self.entity_dim)
num_rels = projection.shape[1]
num_nnodes = tail.shape[1]
tails = []
for i in range(num_chunks):
tail_negs = []
for j in range(num_nnodes):
tail_neg = tail[i][j]
tail_neg = tail_neg.reshape(1, 1, self.entity_dim)
tail_neg = nd.broadcast_axis(tail_neg, axis=0, size=num_rels)
tail_neg = nd.batch_dot(tail_neg, projection[i])
tail_neg = tail_neg.squeeze(axis=1)
tail_negs.append(tail_neg)
tail_negs = nd.stack(*tail_negs, axis=1)
tails.append(tail_negs)
tail = nd.stack(*tails)
return head, tail
return fn