本文整理汇总了Python中model.Model.train方法的典型用法代码示例。如果您正苦于以下问题:Python Model.train方法的具体用法?Python Model.train怎么用?Python Model.train使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类model.Model
的用法示例。
在下文中一共展示了Model.train方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import train [as 别名]
def train(training_list, model_path, format, is_crf=True, grid=False):
# Read the data into a Note object
notes = []
for txt, con in training_list:
note_tmp = Note(format) # Create Note
note_tmp.read(txt, con) # Read data into Note
notes.append(note_tmp) # Add the Note to the list
# file names
if not notes:
print 'Error: Cannot train on 0 files. Terminating train.'
return 1
# Create a Machine Learning model
model = Model(is_crf=is_crf)
# Train the model using the Note's data
model.train(notes, grid)
# Pickle dump
print 'pickle dump'
with open(model_path, "wb") as m_file:
pickle.dump(model, m_file)
示例2: main
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import train [as 别名]
def main():
fun = 'fc_cifar'
if len(sys.argv) > 1:
fun = sys.argv[1]
model = Model(name=fun, lr=0.1, n_epochs=500, on_gpu=True)
model = eval(fun + '(model)')
model.train()
示例3: main
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import train [as 别名]
def main(data_filename,stat_filename,max_iter,sample_rate,learn_rate,max_depth,split_points):
dataset=DataSet(data_filename);
print "Model parameters configuration:[data_file=%s,stat_file=%s,max_iter=%d,sample_rate=%f,learn_rate=%f,max_depth=%d,split_points=%d]"%(data_filename,stat_filename,max_iter,sample_rate,learn_rate,max_depth,split_points);
dataset.describe();
stat_file=open(stat_filename,"w");
stat_file.write("iteration\taverage loss in train data\tprediction accuracy on test data\taverage loss in test data\n");
model=Model(max_iter,sample_rate,learn_rate,max_depth,split_points);
train_data=sample(dataset.get_instances_idset(),int(dataset.size()*2.0/3.0));
test_data=set(dataset.get_instances_idset())-set(train_data);
model.train(dataset,train_data,stat_file,test_data);
#model.test(dataset,test_data);
stat_file.close();
示例4: main
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import train [as 别名]
def main(_):
prepare_dirs_and_logger(config)
if not config.task.lower().startswith('tsp'):
raise Exception("[!] Task should starts with TSP")
if config.max_enc_length is None:
config.max_enc_length = config.max_data_length
if config.max_dec_length is None:
config.max_dec_length = config.max_data_length
rng = np.random.RandomState(config.random_seed)
tf.set_random_seed(config.random_seed)
model = Model(config)
batch_size = config.batch_size
train_enc_seq, train_target_seq, train_enc_seq_length, train_target_seq_length = gen_data('data/tsp10.txt')
eval_enc_seq,eval_target_seq,eval_enc_seq_length,eval_target_seq_length = train_enc_seq[-batch_size:], \
train_target_seq[-batch_size:], \
train_enc_seq_length[-batch_size:], \
train_target_seq_length[-batch_size:]
train_enc_seq, train_target_seq, train_enc_seq_length, train_target_seq_length= train_enc_seq[: -batch_size], \
train_target_seq[:-batch_size], \
train_enc_seq_length[:-batch_size], \
train_target_seq_length[:-batch_size]
test_enc_seq, test_target_seq, test_enc_seq_length, test_target_seq_length = gen_data('data/tsp10_test.txt')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(min(config.max_step,len(train_enc_seq)//batch_size)):
train_batch={
'enc_seq': train_enc_seq[step * batch_size:(step + 1) * batch_size],
'enc_seq_length': train_enc_seq_length[step * batch_size:(step + 1) * batch_size],
'target_seq': train_target_seq[step * batch_size:(step + 1) * batch_size],
'target_seq_length': train_target_seq_length[step * batch_size:(step + 1) * batch_size]
}
loss = model.train(sess,train_batch)
print(str(step) + " train loss : " + str(loss))
if step > 0 and step % config.eval_step == 0:
eval_batch = {
'enc_seq': eval_enc_seq,
'enc_seq_length': eval_enc_seq_length,
'target_seq': eval_target_seq,
'target_seq_length': eval_target_seq_length
}
eval_loss = model.eval(sess,eval_batch)
print(str(step) + " eval loss : " + str(eval_loss))
示例5: main
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import train [as 别名]
def main():
args = [i.lower() for i in sys.argv]
if 'help' in args or len(args) is 1:
print_help()
if 'download' in args:
down = Downloader()
down.download()
down.preprocess()
down.write_out(train="train.dat",test="test.dat")
if 'tag' in args:
t = Tagger()
t.tag("test.dat")
t.write_out("test_tagged.dat")
if 'train' in args:
m = Model()
m.train("train.dat")
m.write_out()
if 'test' in args:
m = Model("model.mdl")
m.test("test_tagged.dat")
示例6: Model
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import train [as 别名]
from model import Model
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--epoch', dest='epoch', type=int, default=20, help='# of epoch')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=128, help='# images in batch')
parser.add_argument('--lr', dest='lr', type=int, default=0.001, help='initial learning rate')
parser.add_argument('--state', dest='state', type=int, default=1024, help='LSTM hidden state size')
parser.add_argument('--embed', dest='embed', type=int, default=300, help='Embedding vector size')
parser.add_argument('--drop', dest='drop', type=int, default=0.5, help='Dropout probability')
parser.add_argument('--freq', dest='freq', type=int, default=1024, help='How many top answers')
parser.add_argument('--resnet_features', dest='resnet',
default='resnet_ckpt/resnet_v2_152.ckpt',
help='Path to resnet pretrained weights')
parser.add_argument('--project', dest='project', type=bool,
default=False, help='Project text features instead of tile')
args = parser.parse_args()
vqa_model = Model(batch_size = args.bs,
init_lr=args.bs,
state_size=args.state,
embedding_size=args.embed,
dropout_prob=args.drop,
most_freq_limit=args.freq,
resnet_weights_path=args.resnet,
project=args.project)
vqa_model.train(args.epoch)
示例7: Model
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import train [as 别名]
model = Model(user_count, item_count, cate_count, cate_list)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
print('test_gauc: %.4f\t test_auc: %.4f' % _eval(sess, model))
sys.stdout.flush()
lr = 1
start_time = time.time()
for _ in range(30):
random.shuffle(train_set)
epoch_size = round(len(train_set) / train_batch_size)
loss_sum = 0.0
for _, uij in DataInput(train_set, train_batch_size):
loss = model.train(sess, uij, lr)
loss_sum += loss
if model.global_step.eval() % 1000 == 0:
test_gauc, Auc = _eval(sess, model)
print('Epoch %d Global_step %d\tTrain_loss: %.4f\tEval_GAUC: %.4f\tEval_AUC: %.4f' %
(model.global_epoch_step.eval(), model.global_step.eval(),
loss_sum / 1000, test_gauc, Auc))
sys.stdout.flush()
loss_sum = 0.0
if model.global_step.eval() % 336000 == 0:
lr = 0.1
print('Epoch %d DONE\tCost time: %.2f' %
(model.global_epoch_step.eval(), time.time()-start_time))
示例8: main
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import train [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-t",
dest="txt",
help="The files that contain the training examples",
default=os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../data/concept_assertion_relation_training_data/merged/txt/*"
),
)
parser.add_argument(
"-c",
dest="con",
help="The files that contain the labels for the training examples",
default=os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"../data/concept_assertion_relation_training_data/merged/concept/*",
),
)
parser.add_argument(
"-m",
dest="model",
help="Path to the model that should be generated",
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), "../models/awesome.model"),
)
parser.add_argument(
"-d", dest="disabled_features", help="The features that should not be used", nargs="+", default=None
)
parser.add_argument(
"-e",
dest="enabled_features",
help="The features that should be used. This option trumps -d",
nargs="+",
default=None,
)
parser.add_argument("--no-svm", dest="no_svm", action="store_true", help="Disable SVM model generation")
parser.add_argument("--no-lin", dest="no_lin", action="store_true", help="Disable LIN model generation")
parser.add_argument("--no-crf", dest="no_crf", action="store_true", help="Disable CRF model generation")
args = parser.parse_args()
training_list = []
txt_files = glob.glob(args.txt)
con_files = glob.glob(args.con)
txt_files_map = helper.map_files(txt_files)
con_files_map = helper.map_files(con_files)
for k in txt_files_map:
if k in con_files_map:
training_list.append((txt_files_map[k], con_files_map[k]))
type = 0
if not args.no_svm:
type = type | libml.SVM
if not args.no_lin:
type = type | libml.LIN
if not args.no_crf:
type = type | libml.CRF
# Get data and labels from files
data = []
labels = []
for txt, con in training_list:
datum = read_txt(txt)
data += datum
labels += read_con(con, datum)
# Train a model on the data and labels
model = Model(filename=args.model, type=type)
if args.disabled_features != None:
model.enabled_features = model.enabled_features - Set(args.disabled_features)
if args.enabled_features != None:
model.enabled_features = Set(args.enabled_features)
model.train(data, labels)
示例9: print
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import train [as 别名]
print('Running fold...', len(train_index), len(valid_index))
X_train, y_train = X_train_raw[train_index,...], y_train_raw[train_index,...]
X_valid, y_valid = X_train_raw[valid_index,...], y_train_raw[valid_index,...]
with tf.Graph().as_default(), tf.Session() as sess:
layers = vgg_bn()
model = Model(layers, num_folds, batch_size=BATCH_SIZE)
patience = 2
wait = 0
best = np.Inf
print('Begin training...')
for epoch in range(NUM_EPOCHS):
model.train(X_train, y_train, epoch)
print('Begin validation...')
loss, accuracy, score = model.validate(X_valid, y_valid)
print('Validation: fold: {}, epoch: {}, loss: {}, accuracy: {}, score: {}'.format(num_folds, epoch, loss, accuracy, score))
if loss < best:
print('New best validation loss! Was: {}, Now: {}'.format(best, loss))
best = loss
wait = 0
else:
wait += 1
print('Validation loss did not improve for {}/{} epochs.'.format(wait, patience))
if wait == 2:
示例10: Model
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import train [as 别名]
FLAGS = flags.FLAGS
flags.DEFINE_boolean('test', False, 'If true, test against a random strategy.')
flags.DEFINE_boolean('play', False, 'If true, play against a trained TD-Gammon strategy.')
flags.DEFINE_boolean('restore', False, 'If true, restore a checkpoint before training.')
model_path = os.environ.get('MODEL_PATH', 'models/')
summary_path = os.environ.get('SUMMARY_PATH', 'summaries/')
checkpoint_path = os.environ.get('CHECKPOINT_PATH', 'checkpoints/')
if not os.path.exists(model_path):
os.makedirs(model_path)
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
if not os.path.exists(summary_path):
os.makedirs(summary_path)
if __name__ == '__main__':
graph = tf.Graph()
sess = tf.Session(graph=graph)
with sess.as_default(), graph.as_default():
model = Model(sess, model_path, summary_path, checkpoint_path, restore=FLAGS.restore)
if FLAGS.test:
model.test(episodes=1000)
elif FLAGS.play:
model.play()
else:
model.train()
示例11: Model
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import train [as 别名]
flags.DEFINE_boolean('random_selfplay', False, 'Watch 2 random agents play')
flags.DEFINE_boolean('restore', False, 'If true, restore a checkpoint before training.')
flags.DEFINE_integer('train_episodes', 5000, 'How many episodes to train for')
model_path = os.environ.get('MODEL_PATH', 'models/')
summary_path = os.environ.get('SUMMARY_PATH', 'summaries/')
checkpoint_path = os.environ.get('CHECKPOINT_PATH', 'checkpoints/')
if not os.path.exists(model_path):
os.makedirs(model_path)
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
if not os.path.exists(summary_path):
os.makedirs(summary_path)
if __name__ == '__main__':
graph = tf.Graph()
sess = tf.Session(graph=graph)
with sess.as_default(), graph.as_default():
model = Model(sess, model_path, summary_path, checkpoint_path, restore=FLAGS.restore)
if FLAGS.test:
model.test(episodes=1000)
elif FLAGS.play:
model.play()
elif FLAGS.random_selfplay:
model.random_selfplay()
else:
model.train(FLAGS.train_episodes)
示例12: max
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import train [as 别名]
for column in columns:
print "%s" % column
all_values = data_set.all_values(column)
print all_values
print "min: %s max: %s" % (min([float(value) for value in all_values]), max([float(value) for value in all_values]))
print "\n"
sys.exit(1)
if args.traindata:
data_set = Data(args.traindata)
data_set.open_csv()
for row in data_set:
#print "input row: %s" % row
model.train(row, config_dict["target_key"])
#print model
if args.do_prune:
model.prune()
#print model
def run_test():
train_set = Data(args.testdata)
train_set.open_csv()
correct_dict = {}
incorrect_dict = {}
num_correct = 0
num_rows = 0
示例13: Exception
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import train [as 别名]
import argparse
from model import Model
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--n_iter', type=int, default=10000,
help='number of iterations')
parser.add_argument('--save_dir', type=str, default='save',
help='dir for saving weights')
parser.add_argument('--data_path', type=str,
help='path to train data')
parser.add_argument('--learning_rate', type=int, default=0.0001,
help='learning rate')
parser.add_argument('--batch_size', type=int, default=64,
help='batch size')
parser.add_argument('--restore_from', type=str,
help='path to train saved weights')
args = parser.parse_args()
if __name__ == '__main__':
if not args.data_path:
raise Exception('please specify path to train data with --data_path')
gen = Model(args.learning_rate)
gen.train(args.data_path, args.save_dir, args.n_iter, args.batch_size, args.restore_from)
示例14: Trainer
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import train [as 别名]
class Trainer(object):
def __init__(self, config, rng):
self.config = config
self.rng = rng
self.task = config.task
self.model_dir = config.model_dir
self.gpu_memory_fraction = config.gpu_memory_fraction
self.log_step = config.log_step
self.max_step = config.max_step
self.num_log_samples = config.num_log_samples
self.checkpoint_secs = config.checkpoint_secs
if config.task.lower().startswith('tsp'):
self.data_loader = TSPDataLoader(config, rng=self.rng)
else:
raise Exception("[!] Unknown task: {}".format(config.task))
self.model = Model(
config,
inputs=self.data_loader.x,
labels=self.data_loader.y,
enc_seq_length=self.data_loader.seq_length,
dec_seq_length=self.data_loader.seq_length,
mask=self.data_loader.mask)
self.build_session()
show_all_variables()
def build_session(self):
self.saver = tf.train.Saver()
self.summary_writer = tf.summary.FileWriter(self.model_dir)
sv = tf.train.Supervisor(logdir=self.model_dir,
is_chief=True,
saver=self.saver,
summary_op=None,
summary_writer=self.summary_writer,
save_summaries_secs=300,
save_model_secs=self.checkpoint_secs,
global_step=self.model.global_step)
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=self.gpu_memory_fraction,
allow_growth=True) # seems to be not working
sess_config = tf.ConfigProto(allow_soft_placement=True,
gpu_options=gpu_options)
self.sess = sv.prepare_or_wait_for_session(config=sess_config)
def train(self):
tf.logging.info("Training starts...")
self.data_loader.run_input_queue(self.sess)
summary_writer = None
for k in trange(self.max_step, desc="train"):
fetch = {
'optim': self.model.optim,
}
result = self.model.train(self.sess, fetch, summary_writer)
if result['step'] % self.log_step == 0:
self._test(self.summary_writer)
summary_writer = self._get_summary_writer(result)
self.data_loader.stop_input_queue()
def test(self):
tf.logging.info("Testing starts...")
self.data_loader.run_input_queue(self.sess)
for idx in range(10):
self._test(None)
self.data_loader.stop_input_queue()
def _test(self, summary_writer):
fetch = {
'loss': self.model.total_inference_loss,
'pred': self.model.dec_inference,
'true': self.model.dec_targets,
}
result = self.model.test(self.sess, fetch, summary_writer)
tf.logging.info("")
tf.logging.info("test loss: {}".format(result['loss']))
for idx in range(self.num_log_samples):
pred, true = result['pred'][idx], result['true'][idx]
tf.logging.info("test pred: {}".format(pred))
tf.logging.info("test true: {} ({})".format(true, np.array_equal(pred, true)))
if summary_writer:
summary_writer.add_summary(result['summary'], result['step'])
def _get_summary_writer(self, result):
if result['step'] % self.log_step == 0:
return self.summary_writer
else:
#.........这里部分代码省略.........
示例15: main
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import train [as 别名]
def main():
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--pred",
help = "Predictions file",
dest = "pred",
)
parser.add_argument("--model",
help = "Where to store model",
dest = "model",
)
parser.add_argument("--no-context",
help = "Should context features be disabled",
dest = "no_context",
action = 'store_true',
)
parser.add_argument("--overfit",
help = "Should the model do an overfitting sanity check?",
dest = "overfit",
action = 'store_true'
)
args = parser.parse_args()
# Error check args
if not args.pred or not os.path.exists(args.pred):
print >>sys.stderr, '\n\tERROR: must provide predictions file\n'
parser.print_help()
exit(1)
if not args.model or not os.path.exists(os.path.dirname(args.model)):
print >>sys.stderr, '\n\tERROR: must provide where to store model\n'
parser.print_help()
exit(1)
# load predictions
all_pred = load_predictions.Predictions(args.pred, verbose=0)
train_pred = all_pred.images()
# load annotations
all_gold = load_annotations.Annotations(train_pred)
train_gold = all_gold.images()
# Ensure you're looking at the same set of images
predicted_names = set([ img.getName() for img in train_pred ])
annotated_names = set([ img.getName() for img in train_gold ])
common_names = predicted_names & annotated_names
print 'predictions: ', len(predicted_names)
print 'annotations: ', len(annotated_names)
print 'common: ', len(common_names)
pred_imgs= {i.getName():i for i in train_pred if i.getName() in common_names}
gold_imgs= {i.getName():i for i in train_gold if i.getName() in common_names}
print 'predictions: ', len(pred_imgs)
print 'annotations: ', len(gold_imgs)
# Build classifier that generates context features
m = Model(context=not args.no_context, overfit=args.overfit)
m.train(pred_imgs, gold_imgs)
# TODO - arg for where to serialize model
# Serialize the model
print 'serializing ', args.model
with open(args.model, 'wb') as f:
pickle.dump(m, f)