本文整理汇总了Python中model.get_model方法的典型用法代码示例。如果您正苦于以下问题:Python model.get_model方法的具体用法?Python model.get_model怎么用?Python model.get_model使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类model
的用法示例。
在下文中一共展示了model.get_model方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test
# 需要导入模块: import model [as 别名]
# 或者: from model import get_model [as 别名]
def test(weights_path, batch_size):
"""Tests a model."""
try:
# Loads or creates test data.
input_shape, test, test_targets, \
test_coords, orig_test_shape = get_test_data()
except FileNotFoundError as e:
print(e)
print("Could not find test files in data_dir. "
"Did you specify the correct orig_test_data_dir?")
return
# Loads or creates model.
model, checkpoint_path, _ = get_model(input_shape,
scale_factor=len(test)/batch_size,
weights_path=weights_path)
# Predicts on test data and saves results.
predict(model, test, test_targets, test_coords,
orig_test_shape, input_shape)
plots()
示例2: train
# 需要导入模块: import model [as 别名]
# 或者: from model import get_model [as 别名]
def train(weights_path, epochs, batch_size, initial_epoch,
kl_start_epoch, kl_alpha_increase_per_epoch):
"""Trains a model."""
print ('loading data...')
# Loads or creates training data.
input_shape, train, valid, train_targets, valid_targets = get_train_data()
print ('getting model...')
# Loads or creates model.
model, checkpoint_path, kl_alpha = get_model(input_shape,
scale_factor=len(train)/batch_size,
weights_path=weights_path)
# Sets callbacks.
checkpointer = ModelCheckpoint(checkpoint_path, verbose=1,
save_weights_only=True, save_best_only=True)
scheduler = LearningRateScheduler(schedule)
annealer = Callback() if kl_alpha is None else AnnealingCallback(kl_alpha, kl_start_epoch, kl_alpha_increase_per_epoch)
print ('fitting model...')
# Trains model.
model.fit(train, train_targets, batch_size, epochs,
initial_epoch=initial_epoch,
callbacks=[checkpointer, scheduler, annealer],
validation_data=(valid, valid_targets))
示例3: __init__
# 需要导入模块: import model [as 别名]
# 或者: from model import get_model [as 别名]
def __init__(self, model_folder, checkpoint_file):
sys.path.append(model_folder)
from model import get_model
from dataset import load_data
self.dataset = load_data('validation')
self.sess = tf.InteractiveSession()
self.model = get_model('policy')
saver = tf.train.Saver()
saver.restore(self.sess, checkpoint_file)
示例4: export_input_graph
# 需要导入模块: import model [as 别名]
# 或者: from model import get_model [as 别名]
def export_input_graph(model_folder):
sys.path.append(model_folder)
from model import get_model
with tf.Session() as sess:
model = get_model('policy')
saver = tf.train.Saver()
tf.train.write_graph(sess.graph_def, model_folder, 'input_graph.pb', as_text=True)
示例5: main
# 需要导入模块: import model [as 别名]
# 或者: from model import get_model [as 别名]
def main():
args = get_args()
image_dir = args.image_dir
weight_file = args.weight_file
val_noise_model = get_noise_model(args.test_noise_model)
model = get_model(args.model)
model.load_weights(weight_file)
if args.output_dir:
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
image_paths = list(Path(image_dir).glob("*.*"))
for image_path in image_paths:
image = cv2.imread(str(image_path))
h, w, _ = image.shape
#image = image[:(h // 16) * 16, :(w // 16) * 16] # for stride (maximum 16)
h, w, _ = image.shape
out_image = np.zeros((h, w * 1, 3), dtype=np.uint8)
noise_image = val_noise_model(image)
pred = model.predict(np.expand_dims(noise_image, 0))
denoised_image = get_image(pred[0])
out_image[:, :w] = denoised_image
if args.output_dir:
cv2.imwrite(str(output_dir.joinpath(image_path.name))[:-4] + ".png", out_image)
else:
cv2.imshow("result", out_image)
key = cv2.waitKey(-1)
# "q": quit
if key == 113:
return 0
示例6: merge_models
# 需要导入模块: import model [as 别名]
# 或者: from model import get_model [as 别名]
def merge_models(args, model, ingr_vocab_size, instrs_vocab_size):
load_args = pickle.load(open(os.path.join(args.save_dir, args.project_name,
args.transfer_from, 'checkpoints/args.pkl'), 'rb'))
model_ingrs = get_model(load_args, ingr_vocab_size, instrs_vocab_size)
model_path = os.path.join(args.save_dir, args.project_name, args.transfer_from, 'checkpoints', 'modelbest.ckpt')
# Load the trained model parameters
model_ingrs.load_state_dict(torch.load(model_path, map_location=map_loc))
model.ingredient_decoder = model_ingrs.ingredient_decoder
args.transf_layers_ingrs = load_args.transf_layers_ingrs
args.n_att_ingrs = load_args.n_att_ingrs
return args, model
示例7: main
# 需要导入模块: import model [as 别名]
# 或者: from model import get_model [as 别名]
def main():
args = get_args()
image_dir = args.image_dir
weight_file = args.weight_file
val_noise_model = get_noise_model(args.test_noise_model)
model = get_model(args.model)
model.load_weights(weight_file)
if args.output_dir:
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
image_paths = list(Path(image_dir).glob("*.*"))
for image_path in image_paths:
image = cv2.imread(str(image_path))
h, w, _ = image.shape
image = image[:(h // 16) * 16, :(w // 16) * 16] # for stride (maximum 16)
h, w, _ = image.shape
out_image = np.zeros((h, w * 3, 3), dtype=np.uint8)
noise_image = val_noise_model(image)
pred = model.predict(np.expand_dims(noise_image, 0))
denoised_image = get_image(pred[0])
out_image[:, :w] = image
out_image[:, w:w * 2] = noise_image
out_image[:, w * 2:] = denoised_image
if args.output_dir:
cv2.imwrite(str(output_dir.joinpath(image_path.name))[:-4] + ".png", out_image)
else:
cv2.imshow("result", out_image)
key = cv2.waitKey(-1)
# "q": quit
if key == 113:
return 0
示例8: get_coin_decisions
# 需要导入模块: import model [as 别名]
# 或者: from model import get_model [as 别名]
def get_coin_decisions(df, backtest=True):
model = get_model(df)
df_list, backtests = get_dataset_df(df, backtest)
total_decisions_df = pd.DataFrame()
total_prices_df = pd.DataFrame()
for coin, coin_df in backtests.items():
X, y = get_dataset(coin_df)
final_df = get_backtest_action(X, y, model)
for col in ['date', 'price']:
final_df[col] = coin_df[col]
coin_decision_df = final_df[['date', 'final_decision']]
coin_prices_df = final_df[['date', 'price']]
coin_decision_df.columns = ['date', coin]
coin_prices_df.columns = ['date', coin]
if total_decisions_df.empty:
total_decisions_df = coin_decision_df
else:
total_decisions_df = pd.merge(total_decisions_df, coin_decision_df)
if total_prices_df.empty:
total_prices_df = coin_prices_df
else:
total_prices_df = pd.merge(total_prices_df, coin_prices_df)
df_list = []
for df in [total_decisions_df, total_prices_df]:
df.set_index('date', inplace=True)
df_list.append(df.T.reset_index())
return df_list
示例9: train
# 需要导入模块: import model [as 别名]
# 或者: from model import get_model [as 别名]
def train(args):
device = args.device
load_path = args.load_path
# load data
train_data = load_data('train')
val_data = load_data('validation')
# load model
with tf.device('/gpu:%d' % device):
model = get_model('train')
# trainer init
optimizer = Config.optimizer
train_step = optimizer.minimize(model.loss)
# init session and server
sess = tf.InteractiveSession()
saver = tf.train.Saver()
if load_path==None:
sess.run(tf.initialize_all_variables())
else:
saver.restore(sess, load_path)
print("Model restored from %s" % load_path)
# accuracy
pred = tf.reshape(model.pred, [-1, 9*10*16])
label = tf.reshape(model.label, [-1, 9*10*16])
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(label,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
logging.basicConfig(filename='log.txt', level=logging.DEBUG)
# train steps
for i in range(Config.n_epoch):
# training step
batch_data, batch_label = train_data.next_batch(Config.minibatch_size)
input_dict = {model.label:batch_label}
for var, data in zip(model.inputs, batch_data):
input_dict[var]=data
#from IPython import embed;embed()
sess.run(train_step, feed_dict=input_dict)
# evalue step
if (i+1)%Config.evalue_point == 0:
batch_data, batch_label = val_data.next_batch(Config.minibatch_size)
val_dict = {model.label:batch_label}
for var, data in zip(model.inputs, batch_data):
val_dict[var]=data
score = accuracy.eval(feed_dict=val_dict)
print("epoch %d, accuracy is %.2f" % (i,score))
logging.info("epoch %d, accuracy is %.2f" % (i,score))
# save step
if (i+1)%Config.check_point == 0:
save_path = saver.save(sess, "%s/epoch-%d" %(Config.save_path, i))
print("Model saved in file: %s" % save_path)
logging.info("Model saved in file: %s" % save_path)
示例10: train
# 需要导入模块: import model [as 别名]
# 或者: from model import get_model [as 别名]
def train(args):
device = args.device
load_path = args.load_path
# load data
train_data = load_data('train')
val_data = load_data('validation')
# load model
with tf.device('/gpu:%d' % device):
model = get_model('train')
# trainer init
optimizer = Config.optimizer
train_step = optimizer.minimize(model.loss)
# init session and server
sess = tf.InteractiveSession()
saver = tf.train.Saver()
if load_path==None:
sess.run(tf.initialize_all_variables())
else:
saver.restore(sess, load_path)
print("Model restored from %s" % load_path)
# accuracy
pred = tf.reshape(model.pred, [-1, 9*10*16])
label = tf.reshape(model.label, [-1, 9*10*16])
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(label,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# train steps
for i in range(Config.n_epoch):
# training step
batch_data, batch_label = train_data.next_batch(Config.minibatch_size)
input_dict = {model.label:batch_label}
for var, data in zip(model.inputs, batch_data):
input_dict[var]=data
#from IPython import embed;embed()
sess.run(train_step, feed_dict=input_dict)
# evalue step
if (i+1)%Config.evalue_point == 0:
batch_data, batch_label = val_data.next_batch(Config.minibatch_size)
val_dict = {model.label:batch_label}
for var, data in zip(model.inputs, batch_data):
val_dict[var]=data
score = accuracy.eval(feed_dict=val_dict)
print("epoch %d, accuracy is %.2f" % (i,score))
# save step
if (i+1)%Config.check_point == 0:
save_path = saver.save(sess, "%s/epoch-%d" %(Config.save_path, i))
print("Model saved in file: %s" % save_path)
示例11: train
# 需要导入模块: import model [as 别名]
# 或者: from model import get_model [as 别名]
def train(args):
device = args.device
load_path = args.load_path
# load data
train_data = load_data('train')
val_data = load_data('validation')
# load model
with tf.device('/gpu:%d' % device):
model = get_model('policy')
# trainer init
optimizer = Config.optimizer
train_step = optimizer.minimize(model.loss)
# init session and server
sess = tf.InteractiveSession()
saver = tf.train.Saver()
if load_path==None:
sess.run(tf.initialize_all_variables())
else:
saver.restore(sess, load_path)
print("Model restored from %s" % load_path)
# accuracy
pred = tf.reshape(model.pred, [-1, 9*10*16])
label = tf.reshape(model.label, [-1, 9*10*16])
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(label,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
logging.basicConfig(filename='log.txt', level=logging.DEBUG)
# train steps
for i in range(Config.n_epoch):
# training step
batch_data, batch_label = train_data.next_batch(Config.minibatch_size)
input_dict = {model.label:batch_label}
for var, data in zip(model.inputs, batch_data):
input_dict[var]=data
#from IPython import embed;embed()
sess.run(train_step, feed_dict=input_dict)
# evalue step
if (i+1)%Config.evalue_point == 0:
batch_data, batch_label = val_data.next_batch(Config.minibatch_size)
val_dict = {model.label:batch_label}
for var, data in zip(model.inputs, batch_data):
val_dict[var]=data
score = accuracy.eval(feed_dict=val_dict)
print("epoch %d, accuracy is %.2f" % (i,score))
logging.info("epoch %d, accuracy is %.2f" % (i,score))
# save step
if (i+1)%Config.check_point == 0:
save_path = saver.save(sess, "%s/epoch-%d" %(Config.save_path, i))
print("Model saved in file: %s" % save_path)
logging.info("Model saved in file: %s" % save_path)
示例12: main
# 需要导入模块: import model [as 别名]
# 或者: from model import get_model [as 别名]
def main():
args = get_args()
image_dir = args.image_dir
test_dir = args.test_dir
image_size = args.image_size
batch_size = args.batch_size
nb_epochs = args.nb_epochs
lr = args.lr
steps = args.steps
loss_type = args.loss
output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
model = get_model(args.model)
if args.weight is not None:
model.load_weights(args.weight)
opt = Adam(lr=lr)
callbacks = []
if loss_type == "l0":
l0 = L0Loss()
callbacks.append(UpdateAnnealingParameter(l0.gamma, nb_epochs, verbose=1))
loss_type = l0()
model.compile(optimizer=opt, loss=loss_type, metrics=[PSNR])
source_noise_model = get_noise_model(args.source_noise_model)
target_noise_model = get_noise_model(args.target_noise_model)
val_noise_model = get_noise_model(args.val_noise_model)
generator = NoisyImageGenerator(image_dir, source_noise_model, target_noise_model, batch_size=batch_size,
image_size=image_size)
val_generator = ValGenerator(test_dir, val_noise_model)
output_path.mkdir(parents=True, exist_ok=True)
callbacks.append(LearningRateScheduler(schedule=Schedule(nb_epochs, lr)))
callbacks.append(ModelCheckpoint(str(output_path) + "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
monitor="val_PSNR",
verbose=1,
mode="max",
save_best_only=True))
hist = model.fit_generator(generator=generator,
steps_per_epoch=steps,
epochs=nb_epochs,
validation_data=val_generator,
verbose=1,
callbacks=callbacks)
np.savez(str(output_path.joinpath("history.npz")), history=hist.history)
示例13: init_all
# 需要导入模块: import model [as 别名]
# 或者: from model import get_model [as 别名]
def init_all(config, gpu_list, checkpoint, mode, *args, **params):
result = {}
logger.info("Begin to initialize dataset and formatter...")
if mode == "train":
init_formatter(config, ["train", "valid"], *args, **params)
result["train_dataset"], result["valid_dataset"] = init_dataset(config, *args, **params)
else:
init_formatter(config, ["test"], *args, **params)
result["test_dataset"] = init_test_dataset(config, *args, **params)
logger.info("Begin to initialize models...")
model = get_model(config.get("model", "model_name"))(config, gpu_list, *args, **params)
optimizer = init_optimizer(model, config, *args, **params)
trained_epoch = 0
global_step = 0
if len(gpu_list) > 0:
model = model.cuda()
try:
model.init_multi_gpu(gpu_list, config, *args, **params)
except Exception as e:
logger.warning("No init_multi_gpu implemented in the model, use single gpu instead.")
try:
parameters = torch.load(checkpoint)
model.load_state_dict(parameters["model"])
if mode == "train":
trained_epoch = parameters["trained_epoch"]
if config.get("train", "optimizer") == parameters["optimizer_name"]:
optimizer.load_state_dict(parameters["optimizer"])
else:
logger.warning("Optimizer changed, do not load parameters of optimizer.")
if "global_step" in parameters:
global_step = parameters["global_step"]
except Exception as e:
information = "Cannot load checkpoint file with error %s" % str(e)
if mode == "test":
logger.error(information)
raise e
else:
logger.warning(information)
result["model"] = model
if mode == "train":
result["optimizer"] = optimizer
result["trained_epoch"] = trained_epoch
result["output_function"] = init_output_function(config)
result["global_step"] = global_step
logger.info("Initialize done.")
return result