本文整理匯總了Python中model.config.Config方法的典型用法代碼示例。如果您正苦於以下問題:Python config.Config方法的具體用法?Python config.Config怎麽用?Python config.Config使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類model.config
的用法示例。
在下文中一共展示了config.Config方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: from model import config [as 別名]
# 或者: from model.config import Config [as 別名]
def main():
"""Procedure to build data
You MUST RUN this procedure to preprocess datasets.
Args:
config: (instance of Config) has attributes like hyper-params...
"""
# get config
config = Config(load=False)
# Process pre trained word vectors
process_wordvectors(config.filename_wordvectors, config.filename_words, \
config.filename_embeddings)
# Process relation2id
process_relation2id(config.filename_relation_origin, config.filename_relation)
# Process train and test datasets
check_entity_in_sentence(config.filename_train_origin, config.filename_train, \
config.filename_train_wrong)
check_entity_in_sentence(config.filename_test_origin, config.filename_test, \
config.filename_test_wrong)
示例2: main
# 需要導入模塊: from model import config [as 別名]
# 或者: from model.config import Config [as 別名]
def main():
# create instance of config
config = Config()
# build model
model = PCNNModel(config)
model.build()
model.restore_session(config.restore_model)
# create dataset
test = getDataset(config.filename_test, config.processing_word,
config.processing_tag, config.max_iter)
# evaluate and interact
model.evaluate(test)
interactive_shell(model)
示例3: main
# 需要導入模塊: from model import config [as 別名]
# 或者: from model.config import Config [as 別名]
def main():
# create instance of config
config = Config()
# build model
model = PCNNModel(config)
model.build()
# model.restore_session("results/crf/model.weights/") # optional, restore weights
# model.reinitialize_weights("proj")
# create datasets
dev = getDataset(config.filename_dev, config.processing_word,
config.processing_relation, config.max_iter)
train = getDataset(config.filename_train, config.processing_word,
config.processing_relation, config.max_iter)
# train model
model.train(train, dev)
示例4: main
# 需要導入模塊: from model import config [as 別名]
# 或者: from model.config import Config [as 別名]
def main(argv=None):
# Configurations
config = Config()
config.DATA_DIR = ['/data/']
config.LOG_DIR = './log/model'
config.MODE = 'training'
config.STEPS_PER_EPOCH_VAL = 180
config.display()
# Get images and labels.
dataset_train = Dataset(config, 'train')
# Build a Graph
model = Model(config)
# Train the model
model.compile()
model.train(dataset_train, None)
示例5: main
# 需要導入模塊: from model import config [as 別名]
# 或者: from model.config import Config [as 別名]
def main():
# create instance of config
config = Config()
if config.use_elmo: config.processing_word = None
#build model
model = NERModel(config)
# create datasets
dev = CoNLLDataset(config.filename_dev, config.processing_word,
config.processing_tag, config.max_iter, config.use_crf)
train = CoNLLDataset(config.filename_train, config.processing_word,
config.processing_tag, config.max_iter, config.use_crf)
learn = NERLearner(config, model)
learn.fit(train, dev)
示例6: predict
# 需要導入模塊: from model import config [as 別名]
# 或者: from model.config import Config [as 別名]
def predict():
config=Config()
threshold=(config.sequence_length/2)+1
config.batch_size=1
model = BertModel(config)
gpu_config = tf.ConfigProto()
gpu_config.gpu_options.allow_growth = True
saver = tf.train.Saver()
ckpt_dir = config.ckpt_dir
print("ckpt_dir:",ckpt_dir)
with tf.Session(config=gpu_config) as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, tf.train.latest_checkpoint(ckpt_dir))
for i in range(100):
# 2.feed data
input_x = np.random.randn(config.batch_size, config.sequence_length) # [None, self.sequence_length]
input_x[input_x >= 0] = 1
input_x[input_x < 0] = 0
target_label = generate_label(input_x,threshold)
input_sum=np.sum(input_x)
# 3.run session to train the model, print some logs.
logit,prediction = sess.run([model.logits, model.predictions],feed_dict={model.input_x: input_x ,model.dropout_keep_prob: config.dropout_keep_prob})
print("target_label:", target_label,";input_sum:",input_sum,"threshold:",threshold,";prediction:",prediction);
print("input_x:",input_x,";logit:",logit)
示例7: predict
# 需要導入模塊: from model import config [as 別名]
# 或者: from model.config import Config [as 別名]
def predict():
config=Config()
threshold=(config.sequence_length/2)+1
config.batch_size=1
model = BertCNNModel(config)
gpu_config = tf.ConfigProto()
gpu_config.gpu_options.allow_growth = True
saver = tf.train.Saver()
ckpt_dir = config.ckpt_dir
print("ckpt_dir:",ckpt_dir)
with tf.Session(config=gpu_config) as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, tf.train.latest_checkpoint(ckpt_dir))
for i in range(100):
# 2.feed data
input_x = np.random.randn(config.batch_size, config.sequence_length) # [None, self.sequence_length]
input_x[input_x >= 0] = 1
input_x[input_x < 0] = 0
target_label = generate_label(input_x,threshold)
input_sum=np.sum(input_x)
# 3.run session to train the model, print some logs.
logit,prediction = sess.run([model.logits, model.predictions],feed_dict={model.input_x: input_x ,model.dropout_keep_prob: config.dropout_keep_prob})
print("target_label:", target_label,";input_sum:",input_sum,"threshold:",threshold,";prediction:",prediction);
print("input_x:",input_x,";logit:",logit)
示例8: main
# 需要導入模塊: from model import config [as 別名]
# 或者: from model.config import Config [as 別名]
def main():
# create instance of config
config = Config()
# build model
model = ASPECTModel(config)
model.build()
model.restore_session(config.dir_model)
# create dataset
test = CoNLLDataset(config.filename_test, config.processing_word,
config.processing_tag, config.max_iter)
# evaluate and interact
model.evaluate(test)
interactive_shell(model)
示例9: main
# 需要導入模塊: from model import config [as 別名]
# 或者: from model.config import Config [as 別名]
def main():
# create instance of config
config = Config()
# build model
model = ASPECTModel(config)
model.build()
# model.restore_session("results/crf/model.weights/") # optional, restore weights
# model.reinitialize_weights("proj")
# create datasets
dev = CoNLLDataset(config.filename_dev, config.processing_word,
config.processing_tag, config.max_iter)
train = CoNLLDataset(config.filename_train, config.processing_word,
config.processing_tag, config.max_iter)
# train model
model.train(train, dev)
示例10: main
# 需要導入模塊: from model import config [as 別名]
# 或者: from model.config import Config [as 別名]
def main():
# create instance of config
config = Config()
model = BLSTMCRF(config) #Word_BLSTM(config)
model.build()
model.compile(optimizer=model.get_optimizer(), loss=model.get_loss()) #, metrics=['acc']
model.load_weights('./saves/test20.h5') #./saves/blstmCrf_15.h5
test = CoNLLDataset(config.filename_test, config.processing_word,
config.processing_tag, config.max_iter)
batch_size = config.batch_size
nbatches_test, test_generator = model.batch_iter(test, batch_size, return_lengths=True)
model.run_evaluate(test_generator, nbatches_test)
# test predictions
words = "Fa Mulan is from Dynasty Trading Limited"
words = words.split(" ")
pred = model.predict_words(words)
print(words)
print(pred)
示例11: main
# 需要導入模塊: from model import config [as 別名]
# 或者: from model.config import Config [as 別名]
def main():
# create instance of config
config = Config()
# build model
model = NERModel(config)
model.build()
model.restore_session(config.dir_model)
# create dataset
test = CoNLLDataset(config.filename_test, config.processing_word,
config.processing_tag, config.max_iter)
# evaluate and interact
model.evaluate(test)
interactive_shell(model)
示例12: main
# 需要導入模塊: from model import config [as 別名]
# 或者: from model.config import Config [as 別名]
def main():
# create instance of config
config = Config()
# build model
model = NERModel(config)
model.build()
# model.restore_session("results/crf/model.weights/") # optional, restore weights
# model.reinitialize_weights("proj")
# create datasets
dev = CoNLLDataset(config.filename_dev, config.processing_word,
config.processing_tag, config.max_iter)
train = CoNLLDataset(config.filename_train, config.processing_word,
config.processing_tag, config.max_iter)
# train model
model.train(train, dev)
示例13: main
# 需要導入模塊: from model import config [as 別名]
# 或者: from model.config import Config [as 別名]
def main():
# create instance of config
config = Config()
if config.use_elmo: config.processing_word = None
#build model
model = NERModel(config)
learn = NERLearner(config, model)
learn.load()
if len(sys.argv) == 1:
print("No arguments given. Running full test")
sys.argv.append("eval")
sys.argv.append("pred")
if sys.argv[1] == "eval":
# create datasets
test = CoNLLDataset(config.filename_test, config.processing_word,
config.processing_tag, config.max_iter)
learn.evaluate(test)
if sys.argv[1] == "pred" or sys.argv[2] == "pred":
try:
sent = (sys.argv[2] if sys.argv[1] == "pred" else sys.argv[3])
except IndexError:
sent = ["Peter", "Johnson", "lives", "in", "Los", "Angeles"]
print("Predicting sentence: ", sent)
pred = learn.predict(sent)
print(pred)
示例14: train
# 需要導入模塊: from model import config [as 別名]
# 或者: from model.config import Config [as 別名]
def train():
# 1.init config and model
config=Config()
threshold=(config.sequence_length/2)+1
model = BertModel(config)
gpu_config = tf.ConfigProto()
gpu_config.gpu_options.allow_growth = True
saver = tf.train.Saver()
save_path = config.ckpt_dir + "model.ckpt"
#if not os.path.exists(config.ckpt_dir):
# os.makedirs(config.ckpt_dir)
with tf.Session(config=gpu_config) as sess:
sess.run(tf.global_variables_initializer())
if os.path.exists(config.ckpt_dir): #
saver.restore(sess, tf.train.latest_checkpoint(save_path))
for i in range(10000):
# 2.feed data
input_x = np.random.randn(config.batch_size, config.sequence_length) # [None, self.sequence_length]
input_x[input_x >= 0] = 1
input_x[input_x < 0] = 0
input_y = generate_label(input_x,threshold)
p_mask_lm=[i for i in range(batch_size)]
# 3.run session to train the model, print some logs.
loss, _ = sess.run([model.loss_val, model.train_op],feed_dict={model.x_mask_lm: input_x, model.y_mask_lm: input_y,model.p_mask_lm:p_mask_lm,
model.dropout_keep_prob: config.dropout_keep_prob})
print(i, "loss:", loss, "-------------------------------------------------------")
if i==300:
print("label[0]:", input_y[0]);print("input_x:",input_x)
if i % 500 == 0:
saver.save(sess, save_path, global_step=i)
# use saved checkpoint from model to make prediction, and print it, to see whether it is able to do toy task successfully.
示例15: train
# 需要導入模塊: from model import config [as 別名]
# 或者: from model.config import Config [as 別名]
def train():
# 1.init config and model
config=Config()
threshold=(config.sequence_length/2)+1
model = BertCNNModel(config)
gpu_config = tf.ConfigProto()
gpu_config.gpu_options.allow_growth = True
saver = tf.train.Saver()
save_path = config.ckpt_dir + "model.ckpt"
#if not os.path.exists(config.ckpt_dir):
# os.makedirs(config.ckpt_dir)
with tf.Session(config=gpu_config) as sess:
sess.run(tf.global_variables_initializer())
if os.path.exists(config.ckpt_dir): #
saver.restore(sess, tf.train.latest_checkpoint(save_path))
for i in range(10000):
# 2.feed data
input_x = np.random.randn(config.batch_size, config.sequence_length) # [None, self.sequence_length]
input_x[input_x >= 0] = 1
input_x[input_x < 0] = 0
input_y = generate_label(input_x,threshold)
p_mask_lm=[i for i in range(batch_size)]
# 3.run session to train the model, print some logs.
loss, _ = sess.run([model.loss_val, model.train_op],feed_dict={model.x_mask_lm: input_x, model.y_mask_lm: input_y,model.p_mask_lm:p_mask_lm,
model.dropout_keep_prob: config.dropout_keep_prob})
print(i, "loss:", loss, "-------------------------------------------------------")
if i==300:
print("label[0]:", input_y[0]);print("input_x:",input_x)
if i % 500 == 0:
saver.save(sess, save_path, global_step=i)
# use saved checkpoint from model to make prediction, and print it, to see whether it is able to do toy task successfully.