本文整理汇总了Python中quagga.connector.Connector.assign_sequential_weighted_sum方法的典型用法代码示例。如果您正苦于以下问题:Python Connector.assign_sequential_weighted_sum方法的具体用法?Python Connector.assign_sequential_weighted_sum怎么用?Python Connector.assign_sequential_weighted_sum使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类quagga.connector.Connector
的用法示例。
在下文中一共展示了Connector.assign_sequential_weighted_sum方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: AttentionBlock
# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import assign_sequential_weighted_sum [as 别名]
class AttentionBlock(object):
"""
Location based attention block
out = sum_{i=1}^{T}a_i * h_i
a_i = softmax(h_i * u)
"""
def __init__(self, matrices, u, mask=None, device_id=None):
self.context = Context(device_id)
device_id = self.context.device_id
self.output = Matrix.empty_like(matrices[0], device_id)
learning = matrices[0].bpropagable or u.bpropagable
self.output = Connector(self.output, device_id if learning else None)
if matrices[0].bpropagable:
self.matrices, self.dL_dmatrices = \
izip(*matrices.register_usage(device_id, device_id))
else:
self.matrices = matrices.register_usage(device_id)
self.length = matrices.length
if u.bpropagable:
self.u, self.dL_du = u.register_usage(device_id, device_id)
else:
self.u = u.register_usage(device_id)
if mask:
self.mask = mask.register_usage(device_id)
self.a = Matrix.empty(matrices[0].nrows, matrices.length,
'float', device_id)
self.dL_dpre_a = Matrix.empty_like(self.a)
self.a_cols = [self.a[:, i] for i in xrange(len(self.matrices))]
def fprop(self):
for i in xrange(self.length):
self.a_cols[i].assign_dot(self.context, self.matrices[i], self.u)
if hasattr(self, 'mask'):
self.a.fill(self.context, -3.402823466e+38, self.mask, 0.0)
self.a.softmax(self.context, self.a)
self.output.assign_sequential_weighted_sum(self.context, self.a,
self.matrices[:self.length])
self.output.fprop()
def bprop(self):
dL_doutput = self.output.backward_matrix
self.dL_dpre_a.assign_dL_dpre_a(self.context, dL_doutput, self.a,
self.matrices[:self.length])
if hasattr(self, 'dL_dmatrices'):
Matrix.add_attention_tile(self.context, dL_doutput, self.a,
self.dL_dpre_a, self.u,
self.dL_dmatrices[:self.length])
if hasattr(self, 'dL_du'):
self.dL_du.add_attention_derivative(self.context, self.dL_dpre_a,
self.matrices[:self.length])