本文整理匯總了Python中logger.Logger方法的典型用法代碼示例。如果您正苦於以下問題:Python logger.Logger方法的具體用法?Python logger.Logger怎麽用?Python logger.Logger使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類logger
的用法示例。
在下文中一共展示了logger.Logger方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: import logger [as 別名]
# 或者: from logger import Logger [as 別名]
def __init__(self, config, net):
self.net = net
self.config = config
create_dir(self.config.checkpoint_dir)
dataset = VinDataset(self.config, transform=ToTensor())
test_dataset = VinTestDataset(self.config, transform=ToTensorV2())
self.dataloader = DataLoader(dataset, batch_size=self.config.batch_size,
shuffle=True, num_workers=4)
self.test_dataloader = DataLoader(test_dataset, batch_size=1,
shuffle=True, num_workers=1)
self.optimizer = optim.Adam(self.net.parameters(), lr=0.0005)
self.logger = Logger(self.config.log_dir)
self.construct_cors()
self.save()
if config.load:
self.load()
示例2: __init__
# 需要導入模塊: import logger [as 別名]
# 或者: from logger import Logger [as 別名]
def __init__(self,args, splitter, gcn, classifier, comp_loss, dataset, num_classes):
self.args = args
self.splitter = splitter
self.tasker = splitter.tasker
self.gcn = gcn
self.classifier = classifier
self.comp_loss = comp_loss
self.num_nodes = dataset.num_nodes
self.data = dataset
self.num_classes = num_classes
self.logger = logger.Logger(args, self.num_classes)
self.init_optimizers(args)
if self.tasker.is_static:
adj_matrix = u.sparse_prepare_tensor(self.tasker.adj_matrix, torch_size = [self.num_nodes], ignore_batch_dim = False)
self.hist_adj_list = [adj_matrix]
self.hist_ndFeats_list = [self.tasker.nodes_feats.float()]
示例3: run
# 需要導入模塊: import logger [as 別名]
# 或者: from logger import Logger [as 別名]
def run(extractor, classification_layer, images_df, batch_size=64, logger=Logger()):
images_df = images_df.copy()
if len(images_df) == 0:
print 'No images found!'
return -1, 0, 0
probs = extractor.extract(images_df['image_path'].values, [classification_layer],
verbose=1, batch_size=batch_size)
images_df['predicted_class'] = np.argmax(probs, axis=1).tolist()
is_correct = images_df['label'] == images_df['predicted_class']
accuracy = float(is_correct.sum()) / len(images_df)
logger.log('Num images: {}'.format(len(images_df)))
logger.log('Correctly classified: {}/{}'.format(is_correct.sum(), len(images_df)))
logger.log('Accuracy: {:.5f}'.format(accuracy))
logger.log('\n===')
return accuracy, is_correct.sum(), len(images_df)
# image filenames must be in format "{content_name}_stylized_{artist_name}.jpg"
# uncomment methods which you want to evaluate and set the paths to the folders with the stylized images
示例4: configure_logger
# 需要導入模塊: import logger [as 別名]
# 或者: from logger import Logger [as 別名]
def configure_logger(log_dir):
logger.configure(log_dir, format_strs=['log'])
global tb
tb = logger.Logger(log_dir, [logger.make_output_format('tensorboard', log_dir),
logger.make_output_format('csv', log_dir),
logger.make_output_format('stdout', log_dir)])
global log
log = logger.log
示例5: build_tensorboard
# 需要導入模塊: import logger [as 別名]
# 或者: from logger import Logger [as 別名]
def build_tensorboard(self):
from logger import Logger
self.logger = Logger(self.log_path)
示例6: set_logger
# 需要導入模塊: import logger [as 別名]
# 或者: from logger import Logger [as 別名]
def set_logger(status):
if status:
from logger import Logger
date = time.strftime("%m_%d_%H_%M") + '_log'
log_path = './logs/'+ date
if os.path.exists(log_path):
shutil.rmtree(log_path)
os.makedirs(log_path)
logger = Logger(log_path)
return logger
else:
pass
示例7: get_logger
# 需要導入模塊: import logger [as 別名]
# 或者: from logger import Logger [as 別名]
def get_logger(config, mode='train'):
folder = os.path.join('logs', config['name'], mode)
if not os.path.exists(folder):
os.makedirs(folder)
return logger.Logger(folder)
示例8: build_tensorboard
# 需要導入模塊: import logger [as 別名]
# 或者: from logger import Logger [as 別名]
def build_tensorboard(self):
"""Build a tensorboard logger."""
from logger import Logger
self.logger = Logger(self.log_dir)
示例9: run
# 需要導入模塊: import logger [as 別名]
# 或者: from logger import Logger [as 別名]
def run(self):
with tf.Session() as sess:
saver = tf.train.Saver()
logger = Logger(sess=sess, directory=self.directory)
self.value_network.set_session(sess)
sess.run(tf.global_variables_initializer())
for i in range(self.num_episodes):
logger.set_step(step=i)
# Generate simulation paths
self.parallel_sampler.update_policy_params(sess)
paths = self.parallel_sampler.generate_paths(max_num_samples=self.sampler_max_samples)
paths = self.parallel_sampler.truncate_paths(paths, max_num_samples=self.sampler_max_samples)
# Compute the average reward of the sampled paths
logger.add_summary(sess.run(self.summary_op,
feed_dict={self.average_reward:
numpy.mean([path['total_reward'] for path in paths])}))
# Calculate discounted cumulative rewards and advantages
samples = self.sampler.process_paths(paths, self.value_network, self.discount, self.gae_lambda,
self.sampler_center_advantage, positive_advantage=False)
# Update policy network
self.trpo.optimize_policy(sess, samples, logger, subsample_rate=self.subsample_rate)
# Update value network
self.value_network.train(paths)
# Save the model
if (i + 1) % 10 == 0:
saver.save(sess, os.path.join(self.directory, '{}.ckpt'.format(self.task)))
# Print infos
logger.flush()
示例10: configure_logger
# 需要導入模塊: import logger [as 別名]
# 或者: from logger import Logger [as 別名]
def configure_logger(self):
self.logger = Logger(os.path.join(self.out_dir, "log"))
configure(os.path.join(self.out_dir, "log"), flush_secs=5)
示例11: build_tensorboard
# 需要導入模塊: import logger [as 別名]
# 或者: from logger import Logger [as 別名]
def build_tensorboard(self):
from logger import Logger
self.logger = Logger(self.log_path)
# ====================================================================#
# ====================================================================#
示例12: main
# 需要導入模塊: import logger [as 別名]
# 或者: from logger import Logger [as 別名]
def main():
parser = argparse.ArgumentParser(description='OGBN-Products (SIGN)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--runs', type=int, default=10)
args = parser.parse_args()
print(args)
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
dataset = PygNodePropPredDataset(name='ogbn-products')
split_idx = dataset.get_idx_split()
data = SIGN(args.num_layers)(dataset[0]) # This might take a while.
xs = [data.x] + [data[f'x{i}'] for i in range(1, args.num_layers + 1)]
xs_train = [x[split_idx['train']].to(device) for x in xs]
xs_valid = [x[split_idx['valid']].to(device) for x in xs]
xs_test = [x[split_idx['test']].to(device) for x in xs]
y_train_true = data.y[split_idx['train']].to(device)
y_valid_true = data.y[split_idx['valid']].to(device)
y_test_true = data.y[split_idx['test']].to(device)
model = MLP(data.x.size(-1), args.hidden_channels, dataset.num_classes, args.num_layers,
args.dropout).to(device)
evaluator = Evaluator(name='ogbn-products')
logger = Logger(args.runs, args)
for run in range(args.runs):
model.reset_parameters()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, 1 + args.epochs):
loss = train(model, xs_train, y_train_true, optimizer)
train_acc = test(model, xs_train, y_train_true, evaluator)
valid_acc = test(model, xs_valid, y_valid_true, evaluator)
test_acc = test(model, xs_test, y_test_true, evaluator)
result = (train_acc, valid_acc, test_acc)
logger.add_result(run, result)
if epoch % args.log_steps == 0:
train_acc, valid_acc, test_acc = result
print(f'Run: {run + 1:02d}, '
f'Epoch: {epoch:02d}, '
f'Loss: {loss:.4f}, '
f'Train: {100 * train_acc:.2f}%, '
f'Valid: {100 * valid_acc:.2f}%, '
f'Test: {100 * test_acc:.2f}%')
logger.print_statistics(run)
logger.print_statistics()
示例13: main
# 需要導入模塊: import logger [as 別名]
# 或者: from logger import Logger [as 別名]
def main():
parser = argparse.ArgumentParser(description='OGBN-Products (MLP)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--use_node_embedding', action='store_true')
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--runs', type=int, default=10)
args = parser.parse_args()
print(args)
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
dataset = PygNodePropPredDataset(name='ogbn-products')
split_idx = dataset.get_idx_split()
data = dataset[0]
x = data.x
if args.use_node_embedding:
embedding = torch.load('embedding.pt', map_location='cpu')
x = torch.cat([x, embedding], dim=-1)
x = x.to(device)
y_true = data.y.to(device)
train_idx = split_idx['train'].to(device)
model = MLP(x.size(-1), args.hidden_channels, dataset.num_classes, args.num_layers,
args.dropout).to(device)
evaluator = Evaluator(name='ogbn-products')
logger = Logger(args.runs, args)
for run in range(args.runs):
model.reset_parameters()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, 1 + args.epochs):
loss = train(model, x, y_true, train_idx, optimizer)
result = test(model, x, y_true, split_idx, evaluator)
logger.add_result(run, result)
if epoch % args.log_steps == 0:
train_acc, valid_acc, test_acc = result
print(f'Run: {run + 1:02d}, '
f'Epoch: {epoch:02d}, '
f'Loss: {loss:.4f}, '
f'Train: {100 * train_acc:.2f}%, '
f'Valid: {100 * valid_acc:.2f}%, '
f'Test: {100 * test_acc:.2f}%')
logger.print_statistics(run)
logger.print_statistics()
示例14: main
# 需要導入模塊: import logger [as 別名]
# 或者: from logger import Logger [as 別名]
def main():
parser = argparse.ArgumentParser(description='OGBN-MAG (MLP)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--use_node_embedding', action='store_true')
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=500)
parser.add_argument('--runs', type=int, default=10)
args = parser.parse_args()
print(args)
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
dataset = PygNodePropPredDataset(name='ogbn-mag')
split_idx = dataset.get_idx_split()
data = dataset[0]
print(data)
x = data.x_dict['paper']
if args.use_node_embedding:
embedding = torch.load('embedding.pt', map_location='cpu')
x = torch.cat([x, embedding], dim=-1)
x = x.to(device)
y_true = data.y_dict['paper'].to(device)
train_idx = split_idx['train']['paper'].to(device)
model = MLP(x.size(-1), args.hidden_channels, dataset.num_classes,
args.num_layers, args.dropout).to(device)
evaluator = Evaluator(name='ogbn-mag')
logger = Logger(args.runs, args)
for run in range(args.runs):
model.reset_parameters()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, 1 + args.epochs):
loss = train(model, x, y_true, train_idx, optimizer)
result = test(model, x, y_true, split_idx, evaluator)
logger.add_result(run, result)
if epoch % args.log_steps == 0:
train_acc, valid_acc, test_acc = result
print(f'Run: {run + 1:02d}, '
f'Epoch: {epoch:02d}, '
f'Loss: {loss:.4f}, '
f'Train: {100 * train_acc:.2f}%, '
f'Valid: {100 * valid_acc:.2f}%, '
f'Test: {100 * test_acc:.2f}%')
logger.print_statistics(run)
logger.print_statistics()
示例15: main
# 需要導入模塊: import logger [as 別名]
# 或者: from logger import Logger [as 別名]
def main():
parser = argparse.ArgumentParser(description='OGBN-Arxiv (MLP)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--use_node_embedding', action='store_true')
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=500)
parser.add_argument('--runs', type=int, default=10)
args = parser.parse_args()
print(args)
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
dataset = PygNodePropPredDataset(name='ogbn-arxiv')
split_idx = dataset.get_idx_split()
data = dataset[0]
x = data.x
if args.use_node_embedding:
embedding = torch.load('embedding.pt', map_location='cpu')
x = torch.cat([x, embedding], dim=-1)
x = x.to(device)
y_true = data.y.to(device)
train_idx = split_idx['train'].to(device)
model = MLP(x.size(-1), args.hidden_channels, dataset.num_classes,
args.num_layers, args.dropout).to(device)
evaluator = Evaluator(name='ogbn-arxiv')
logger = Logger(args.runs, args)
for run in range(args.runs):
model.reset_parameters()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, 1 + args.epochs):
loss = train(model, x, y_true, train_idx, optimizer)
result = test(model, x, y_true, split_idx, evaluator)
logger.add_result(run, result)
if epoch % args.log_steps == 0:
train_acc, valid_acc, test_acc = result
print(f'Run: {run + 1:02d}, '
f'Epoch: {epoch:02d}, '
f'Loss: {loss:.4f}, '
f'Train: {100 * train_acc:.2f}%, '
f'Valid: {100 * valid_acc:.2f}%, '
f'Test: {100 * test_acc:.2f}%')
logger.print_statistics(run)
logger.print_statistics()