当前位置: 首页>>代码示例>>Python>>正文


Python attention.Attention方法代码示例

本文整理汇总了Python中attention.Attention方法的典型用法代码示例。如果您正苦于以下问题:Python attention.Attention方法的具体用法?Python attention.Attention怎么用?Python attention.Attention使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在attention的用法示例。


在下文中一共展示了attention.Attention方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import attention [as 别名]
# 或者: from attention import Attention [as 别名]
def __init__(self, model, params, vocabulary, attention_key_size):
        self.vocabulary = vocabulary
        self.attention_module = Attention(model,
                                          params.decoder_state_size,
                                          attention_key_size,
                                          attention_key_size)
        self.state_transform_weights = du.add_params(
            model,
            (params.decoder_state_size +
             attention_key_size,
             params.decoder_state_size),
            "weights-state-transform")
        self.vocabulary_weights = du.add_params(
            model, (params.decoder_state_size, len(vocabulary)), "weights-vocabulary")
        self.vocabulary_biases = du.add_params(model,
                                               tuple([len(vocabulary)]),
                                               "biases-vocabulary") 
开发者ID:lil-lab,项目名称:atis,代码行数:19,代码来源:token_predictor.py

示例2: __init__

# 需要导入模块: import attention [as 别名]
# 或者: from attention import Attention [as 别名]
def __init__(self, attention_model, hidden_size, output_size, n_layers=1, dropout_p=.1):
        super(AttentionDecoderRNN, self).__init__()
        self.attention_model = attention_model
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.n_layers = n_layers
        self.dropout_p = dropout_p

        # Define layers
        self.embedding = nn.Embedding(output_size, hidden_size)
        self.gru = nn.GRU(hidden_size * 2, hidden_size, n_layers, dropout=dropout_p)
        self.out = nn.Linear(hidden_size * 2, output_size)

        # Choose attention model
        if attention_model is not None:
            self.attention = Attention(attention_model, hidden_size) 
开发者ID:lingyongyan,项目名称:Neural-Machine-Translation,代码行数:18,代码来源:attention_decoder.py

示例3: run_model

# 需要导入模块: import attention [as 别名]
# 或者: from attention import Attention [as 别名]
def run_model(i,X_test):
    score = np.zeros((5, len(X_test)))
    with CustomObjectScope({'Attention': Attention}):
        model=load_model(curDir+ 'model/binding_model' + str(i+1)+ '.hdf5')
        score[i,:] =np.squeeze(model.predict_proba(X_test))
    return score[i,:] 
开发者ID:jiujiezz,项目名称:deephlapan,代码行数:8,代码来源:deephlapan_main.py

示例4: run_model1

# 需要导入模块: import attention [as 别名]
# 或者: from attention import Attention [as 别名]
def run_model1(i,X_test):
    score1 = np.zeros((5, len(X_test)))
    with CustomObjectScope({'Attention': Attention}):
        model1=load_model(curDir+ 'model/immunogenicity_model' + str(i+1)+ '.hdf5')
        score1[i,:]=np.squeeze(model1.predict_proba(X_test))
    return score1[i,:] 
开发者ID:jiujiezz,项目名称:deephlapan,代码行数:8,代码来源:deephlapan_main.py

示例5: __init__

# 需要导入模块: import attention [as 别名]
# 或者: from attention import Attention [as 别名]
def __init__(self, config, no_words, no_answers, resnet_model, lstm_size, emb_size, use_pretrained=True):
		super(Net, self).__init__()

		self.use_pretrained = use_pretrained # whether to use pretrained ResNet
		self.word_cnt = no_words # total count of words
		self.ans_cnt = no_answers # total count of valid answers
		self.lstm_size = lstm_size # lstm emb size to be passed to CBN layer
		self.emb_size = emb_size # hidden layer size of MLP used to predict delta beta and gamma parameters
		self.config = config # config file containing the values of parameters
		
		self.embedding = nn.Embedding(self.word_cnt, self.emb_size)
		self.lstm = VariableLengthLSTM(self.config['model']).cuda()
		self.net = create_resnet(resnet_model, self.lstm_size, self.emb_size, self.use_pretrained)
		self.attention = Attention(self.config).cuda()
		
		self.que_mlp = nn.Sequential(
						nn.Linear(config['model']['no_hidden_LSTM'], config['model']['no_question_mlp']),
						nn.Tanh(),
						)

		self.img_mlp = nn.Sequential(
						nn.Linear(2048, config['model']['no_image_mlp']),
						nn.Tanh(),
						)

		self.dropout = nn.Dropout(config['model']['dropout_keep_prob'])

		self.final_mlp = nn.Linear(config['model']['no_hidden_final_mlp'], self.ans_cnt)

		self.softmax = nn.Softmax()

		self.loss = nn.CrossEntropyLoss() 
开发者ID:ap229997,项目名称:Conditional-Batch-Norm,代码行数:34,代码来源:net.py

示例6: forward

# 需要导入模块: import attention [as 别名]
# 或者: from attention import Attention [as 别名]
def forward(self, image, tokens, glove_emb, labels=None):

		####### Question Embedding #######
		# get the lstm representation of the final state at time t
		que_emb = self.embedding(tokens)
		emb = torch.cat([que_emb, glove_emb], dim=2)
		lstm_emb, internal_state = self.lstm(emb)
		lstm_emb = lstm_emb[:,-1,:]

		####### Image features using CBN ResNet with Attention ########
		feature = self.net(image, lstm_emb)
		# l2 normalisation
		sq_sum = torch.sqrt(torch.sum(feature**2, dim=1)+EPS)
		sq_sum = torch.stack([sq_sum]*feature.data.shape[1], dim=1)
		feature = feature / sq_sum
		attn_feature = self.attention(feature, lstm_emb)

		####### MLP for question and image embedding ########
		lstm_emb = lstm_emb.view(feature.data.shape[0], -1)
		que_embedding = self.que_mlp(lstm_emb)
		image_embedding = self.img_mlp(attn_feature) 

		####### MLP for fused question and image embedding ########
		full_embedding = que_embedding * image_embedding
		full_embedding = self.dropout(full_embedding)
		out = self.final_mlp(full_embedding)
		
		prob = self.softmax(out)
		val, ind = torch.max(prob, dim=1)
		# hard cross entropy loss
		if labels is not None:
			loss = self.loss(prob, labels)
			return loss, ind
		else:
			return ind 
开发者ID:ap229997,项目名称:Conditional-Batch-Norm,代码行数:37,代码来源:net.py

示例7: build_baseline0

# 需要导入模块: import attention [as 别名]
# 或者: from attention import Attention [as 别名]
def build_baseline0(dataset, num_hid):
    w_emb = WordEmbedding(dataset.dictionary.ntoken, 300, 0.0)
    q_emb = QuestionEmbedding(300, num_hid, 1, False, 0.0)
    v_att = Attention(dataset.v_dim, q_emb.num_hid, num_hid)
    q_net = FCNet([num_hid, num_hid])
    v_net = FCNet([dataset.v_dim, num_hid])
    classifier = SimpleClassifier(
        num_hid, 2 * num_hid, dataset.num_ans_candidates, 0.5)
    return BaseModel(w_emb, q_emb, v_att, q_net, v_net, classifier) 
开发者ID:hengyuan-hu,项目名称:bottom-up-attention-vqa,代码行数:11,代码来源:base_model.py


注:本文中的attention.Attention方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。