本文整理匯總了Python中config.model_type方法的典型用法代碼示例。如果您正苦於以下問題:Python config.model_type方法的具體用法?Python config.model_type怎麽用?Python config.model_type使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類config
的用法示例。
在下文中一共展示了config.model_type方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: import config [as 別名]
# 或者: from config import model_type [as 別名]
def __init__(self, mem_slots, num_heads, head_size, embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx,
gpu=False):
super(RelGAN_G, self).__init__(embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx, gpu)
self.name = 'relgan'
self.temperature = 1.0 # init value is 1.0
self.embeddings = nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx)
if cfg.model_type == 'LSTM':
# LSTM
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(embedding_dim, self.hidden_dim, batch_first=True)
self.lstm2out = nn.Linear(self.hidden_dim, vocab_size)
else:
# RMC
self.hidden_dim = mem_slots * num_heads * head_size
self.lstm = RelationalMemory(mem_slots=mem_slots, head_size=head_size, input_size=embedding_dim,
num_heads=num_heads, return_all_outputs=True)
self.lstm2out = nn.Linear(self.hidden_dim, vocab_size)
self.init_params()
pass
示例2: _build_network
# 需要導入模塊: import config [as 別名]
# 或者: from config import model_type [as 別名]
def _build_network(self):
import config
if config.model_type == MODEL_TYPE_vgg16:
from nets import vgg
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(config.weight_decay),
weights_initializer= tf.contrib.layers.xavier_initializer(),
biases_initializer = tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
padding='SAME') as sc:
self.arg_scope = sc
self.net, self.end_points = vgg.basenet(
inputs = self.inputs)
elif config.model_type == MODEL_TYPE_vgg16_no_dilation:
from nets import vgg
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(config.weight_decay),
weights_initializer= tf.contrib.layers.xavier_initializer(),
biases_initializer = tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
padding='SAME') as sc:
self.arg_scope = sc
self.net, self.end_points = vgg.basenet(
inputs = self.inputs, dilation = False)
else:
raise ValueError('model_type not supported:%s'%(config.model_type))
示例3: init_hidden
# 需要導入模塊: import config [as 別名]
# 或者: from config import model_type [as 別名]
def init_hidden(self, batch_size=cfg.batch_size):
if cfg.model_type == 'LSTM':
h = torch.zeros(1, batch_size, self.hidden_dim)
c = torch.zeros(1, batch_size, self.hidden_dim)
if self.gpu:
return h.cuda(), c.cuda()
else:
return h, c
else:
"""init RMC memory"""
memory = self.lstm.initial_state(batch_size)
memory = self.lstm.repackage_hidden(memory) # detch memory at first
return memory.cuda() if self.gpu else memory
示例4: infer_classic
# 需要導入模塊: import config [as 別名]
# 或者: from config import model_type [as 別名]
def infer_classic(model_type='xgboost_lr',
model_save_path='',
label_vocab_path='',
test_data_path='',
pred_save_path='',
feature_vec_path='',
col_sep='\t',
feature_type='tfidf_word'):
# load data content
data_set, true_labels = data_reader(test_data_path, col_sep)
# init feature
feature = Feature(data=data_set, feature_type=feature_type,
feature_vec_path=feature_vec_path, is_infer=True)
# get data feature
data_feature = feature.get_feature()
# load model
if model_type == 'xgboost_lr':
model = XGBLR(model_save_path)
else:
model = load_pkl(model_save_path)
# predict
pred_label_probs = model.predict_proba(data_feature)
# label id map
label_id = load_vocab(label_vocab_path)
id_label = {v: k for k, v in label_id.items()}
pred_labels = [id_label[prob.argmax()] for prob in pred_label_probs]
pred_output = [id_label[prob.argmax()] + col_sep + str(prob.max()) for prob in pred_label_probs]
logger.info("save infer label and prob result to:%s" % pred_save_path)
save_predict_result(pred_output, ture_labels=None, pred_save_path=pred_save_path, data_set=data_set)
# evaluate
if true_labels:
try:
print(classification_report(true_labels, pred_labels))
print(confusion_matrix(true_labels, pred_labels))
except UnicodeEncodeError:
true_labels_id = [label_id[i] for i in true_labels]
pred_labels_id = [label_id[i] for i in pred_labels]
print(classification_report(true_labels_id, pred_labels_id))
print(confusion_matrix(true_labels_id, pred_labels_id))
except Exception:
print("error. no true labels")
# analysis lr model
if model_type == "logistic_regression":
feature_weight_dict = load_dict(config.lr_feature_weight_path)
pred_labels = cal_multiclass_lr_predict(data_set, feature_weight_dict, id_label)
print(pred_labels[:5])
示例5: infer_deep_model
# 需要導入模塊: import config [as 別名]
# 或者: from config import model_type [as 別名]
def infer_deep_model(model_type='cnn',
data_path='',
model_save_path='',
label_vocab_path='',
max_len=300,
batch_size=128,
col_sep='\t',
pred_save_path=None):
from keras.models import load_model
# load data content
data_set, true_labels = data_reader(data_path, col_sep)
# init feature
# han model need [doc sentence dim] feature(shape 3); others is [sentence dim] feature(shape 2)
if model_type == 'han':
feature_type = 'doc_vectorize'
else:
feature_type = 'vectorize'
feature = Feature(data_set, feature_type=feature_type, is_infer=True, max_len=max_len)
# get data feature
data_feature = feature.get_feature()
# load model
model = load_model(model_save_path)
# predict, in keras, predict_proba same with predict
pred_label_probs = model.predict(data_feature, batch_size=batch_size)
# label id map
label_id = load_vocab(label_vocab_path)
id_label = {v: k for k, v in label_id.items()}
pred_labels = [prob.argmax() for prob in pred_label_probs]
pred_labels = [id_label[i] for i in pred_labels]
pred_output = [id_label[prob.argmax()] + col_sep + str(prob.max()) for prob in pred_label_probs]
logger.info("save infer label and prob result to: %s" % pred_save_path)
save_predict_result(pred_output, ture_labels=None, pred_save_path=pred_save_path, data_set=data_set)
if true_labels:
# evaluate
assert len(pred_labels) == len(true_labels)
for label, prob in zip(true_labels, pred_label_probs):
logger.debug('label_true:%s\tprob_label:%s\tprob:%s' % (label, id_label[prob.argmax()], prob.max()))
print('total eval:')
try:
print(classification_report(true_labels, pred_labels))
print(confusion_matrix(true_labels, pred_labels))
except UnicodeEncodeError:
true_labels_id = [label_id[i] for i in true_labels]
pred_labels_id = [label_id[i] for i in pred_labels]
print(classification_report(true_labels_id, pred_labels_id))
print(confusion_matrix(true_labels_id, pred_labels_id))