本文整理匯總了Python中layers.Attention方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.Attention方法的具體用法?Python layers.Attention怎麽用?Python layers.Attention使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類layers
的用法示例。
在下文中一共展示了layers.Attention方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _setup_layers
# 需要導入模塊: import layers [as 別名]
# 或者: from layers import Attention [as 別名]
def _setup_layers(self):
"""
Creating layers of model.
1. GCN layers.
2. Primary capsules.
3. Attention
4. Graph capsules.
5. Class capsules.
6. Reconstruction layers.
"""
self._setup_base_layers()
self._setup_primary_capsules()
self._setup_attention()
self._setup_graph_capsules()
self._setup_class_capsule()
self._setup_reconstruction_layers()
示例2: _setup_attention
# 需要導入模塊: import layers [as 別名]
# 或者: from layers import Attention [as 別名]
def _setup_attention(self):
"""
Creating attention layer.
"""
self.attention = Attention(self.args.gcn_layers*self.args.capsule_dimensions,
self.args.inner_attention_dimension)
示例3: __init__
# 需要導入模塊: import layers [as 別名]
# 或者: from layers import Attention [as 別名]
def __init__(self, name='ra', nimg=2048, na=512, nh=512, nw=512, nout=8843, npatch=30, model_file=None):
self.name = name
if model_file is not None:
with h5py.File(model_file, 'r') as f:
nimg = f.attrs['nimg']
na = f.attrs['na']
nh = f.attrs['nh']
nw = f.attrs['nw']
nout = f.attrs['nout']
# npatch = f.attrs['npatch']
self.config = {'nimg': nimg, 'na': na, 'nh': nh, 'nw': nw, 'nout': nout, 'npatch': npatch}
# word embedding layer
self.embedding = Embedding(n_emb=nout, dim_emb=nw, name=self.name+'@embedding')
# initialization mlp layer
self.init_mlp = MLP(layer_sizes=[na, 2*nh], output_type='tanh', name=self.name+'@init_mlp')
self.proj_mlp = MLP(layer_sizes=[nimg, na], output_type='tanh', name=self.name+'@proj_mlp')
# lstm
self.lstm = BasicLSTM(dim_x=na+nw, dim_h=nh, name=self.name+'@lstm')
# prediction mlp
self.pred_mlp = MLP(layer_sizes=[na+nh+nw, nout], output_type='softmax', name=self.name+'@pred_mlp')
# attention layer
self.attention = Attention(dim_item=na, dim_context=na+nw+nh, hsize=nh, name=self.name+'@attention')
# inputs
cap = T.imatrix('cap')
img = T.tensor3('img')
self.inputs = [cap, img]
# go through sequence
feat = self.proj_mlp.compute(img)
init_e = feat.mean(axis=1)
init_state = T.concatenate([init_e, self.init_mlp.compute(init_e)], axis=-1)
(state, self.p, loss, self.alpha), _ = theano.scan(fn=self.scan_func,
sequences=[cap[0:-1, :], cap[1:, :]],
outputs_info=[init_state, None, None, None],
non_sequences=[feat])
# loss function
loss = T.mean(loss)
self.costs = [loss]
# layers and parameters
self.layers = [self.embedding, self.init_mlp, self.proj_mlp, self.attention, self.lstm, self.pred_mlp]
self.params = sum([l.params for l in self.layers], [])
# load weights from file, if model_file is not None
if model_file is not None:
self.load_weights(model_file)
# these functions and variables are used in test stage
self._init_func = None
self._step_func = None
self._proj_func = None
self._feat_shared = theano.shared(np.zeros((1, npatch, na)).astype(theano.config.floatX))
示例4: __init__
# 需要導入模塊: import layers [as 別名]
# 或者: from layers import Attention [as 別名]
def __init__(self, name='rass', nimg=2048, nh=512, nw=512, na=512, nout=8843, ns=80, npatch=30, model_file=None):
self.name = name
if model_file is not None:
with h5py.File(model_file, 'r') as f:
nimg = f.attrs['nimg']
nh = f.attrs['nh']
nw = f.attrs['nw']
na = f.attrs['na']
ns = f.attrs['ns']
nout = f.attrs['nout']
self.config = {'nimg': nimg, 'nh': nh, 'nw': nw, 'na': na, 'nout': nout, 'ns': ns, 'npatch': npatch}
# word embedding layer
self.embedding = Embedding(n_emb=nout, dim_emb=nw, name=self.name+'@embedding')
# initialization mlp layer
self.init_mlp = MLP(layer_sizes=[na, 2*nh], output_type='tanh', name=self.name+'@init_mlp')
self.proj_mlp = MLP(layer_sizes=[nimg, na], output_type='tanh', name=self.name+'@proj_mlp')
# attention layer
self.attention = Attention(dim_item=na, dim_context=na+nw+nh, hsize=nh, name=self.name+'@attention')
# lstm
self.lstm = BasicLSTM(dim_x=na+nw+ns, dim_h=nh, name=self.name+'@lstm')
# prediction mlp
self.pred_mlp = MLP(layer_sizes=[na+nh+nw+ns, nout], output_type='softmax', name=self.name+'@pred_mlp')
# inputs
cap = T.imatrix('cap')
img = T.tensor3('img')
scene = T.matrix('scene')
self.inputs = [cap, img, scene]
# go through sequence
feat = self.proj_mlp.compute(img)
init_e = feat.mean(axis=1)
init_state = T.concatenate([init_e, self.init_mlp.compute(init_e)], axis=-1)
(state, self.p, loss, self.alpha), _ = theano.scan(fn=self.scan_func,
sequences=[cap[0:-1, :], cap[1:, :]],
outputs_info=[init_state, None, None, None],
non_sequences=[feat, scene])
# loss function
loss = T.mean(loss)
self.costs = [loss]
# layers and parameters
self.layers = [self.embedding, self.init_mlp, self.proj_mlp, self.attention, self.lstm, self.pred_mlp]
self.params = sum([l.params for l in self.layers], [])
# load weights from file, if model_file is not None
if model_file is not None:
self.load_weights(model_file)
# initialization for test stage
self._init_func = None
self._step_func = None
self._proj_func = None
self._feat_shared = theano.shared(np.zeros((1, npatch, na)).astype(theano.config.floatX))
self._scene_shared = theano.shared(np.zeros((1, ns)).astype(theano.config.floatX))