本文整理匯總了Python中utils.get_optimizer方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.get_optimizer方法的具體用法?Python utils.get_optimizer怎麽用?Python utils.get_optimizer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類utils
的用法示例。
在下文中一共展示了utils.get_optimizer方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_optimizer [as 別名]
def __init__(self, input_dim=None, output_dim=1, factor_order=10, init_path=None, opt_algo='gd', learning_rate=1e-2,
l2_w=0, l2_v=0, random_seed=None):
Model.__init__(self)
init_vars = [('w', [input_dim, output_dim], 'xavier', dtype),
('v', [input_dim, factor_order], 'xavier', dtype),
('b', [output_dim], 'zero', dtype)]
self.graph = tf.Graph()
with self.graph.as_default():
if random_seed is not None:
tf.set_random_seed(random_seed)
self.X = tf.sparse_placeholder(dtype)
self.y = tf.placeholder(dtype)
self.vars = utils.init_var_map(init_vars, init_path)
X_square = tf.SparseTensor(self.X.indices, tf.square(self.X.values), tf.to_int64(tf.shape(self.X)))
xv = tf.square(tf.sparse_tensor_dense_matmul(self.X, self.vars['v']))
p = 0.5 * tf.reshape(
tf.reduce_sum(xv - tf.sparse_tensor_dense_matmul(X_square, tf.square(self.vars['v'])), 1),
[-1, output_dim])
xw = tf.sparse_tensor_dense_matmul(self.X, self.vars['w'])
logits = tf.reshape(xw + self.vars['b'] + p, [-1])
self.y_prob = tf.sigmoid(logits)
self.loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=self.y)) + \
l2_w * tf.nn.l2_loss(xw) + \
l2_v * tf.nn.l2_loss(xv)
self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
tf.global_variables_initializer().run(session=self.sess)
示例2: __init__
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_optimizer [as 別名]
def __init__(self, opt):
self.opt = opt
self.device = torch.device("cuda" if opt.ngpu else "cpu")
self.model, self.classifier = models.get_model(opt.net_type,
opt.classifier_type,
opt.pretrained,
int(opt.nclasses))
self.model = self.model.to(self.device)
self.classifier = self.classifier.to(self.device)
if opt.ngpu>1:
self.model = nn.DataParallel(self.model)
self.loss = models.init_loss(opt.loss_type)
self.loss = self.loss.to(self.device)
self.optimizer = utils.get_optimizer(self.model, self.opt)
self.lr_scheduler = utils.get_lr_scheduler(self.opt, self.optimizer)
self.alpha_scheduler = utils.get_margin_alpha_scheduler(self.opt)
self.train_loader = datasets.generate_loader(opt,'train')
self.test_loader = datasets.generate_loader(opt,'val')
self.epoch = 0
self.best_epoch = False
self.training = False
self.state = {}
self.train_loss = utils.AverageMeter()
self.test_loss = utils.AverageMeter()
self.batch_time = utils.AverageMeter()
self.test_metrics = utils.ROCMeter()
self.best_test_loss = utils.AverageMeter()
self.best_test_loss.update(np.array([np.inf]))
self.visdom_log_file = os.path.join(self.opt.out_path, 'log_files', 'visdom.log')
self.vis = Visdom(port = opt.visdom_port,
log_to_filename=self.visdom_log_file,
env=opt.exp_name + '_' + str(opt.fold))
self.vis_loss_opts = {'xlabel': 'epoch',
'ylabel': 'loss',
'title':'losses',
'legend': ['train_loss', 'val_loss']}
self.vis_tpr_opts = {'xlabel': 'epoch',
'ylabel': 'tpr',
'title':'val_tpr',
'legend': ['tpr@fpr10-2', 'tpr@fpr10-3', 'tpr@fpr10-4']}
self.vis_epochloss_opts = {'xlabel': 'epoch',
'ylabel': 'loss',
'title':'epoch_losses',
'legend': ['train_loss', 'val_loss']}
示例3: __init__
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_optimizer [as 別名]
def __init__(self, opt):
self.opt = opt
self.device = torch.device("cuda" if opt.ngpu else "cpu")
self.model, self.classifier = models.get_model(opt.net_type,
opt.loss_type,
opt.pretrained,
int(opt.nclasses))
self.model = self.model.to(self.device)
self.classifier = self.classifier.to(self.device)
if opt.ngpu>1:
self.model = nn.DataParallel(self.model)
self.loss = models.init_loss(opt.loss_type)
self.loss = self.loss.to(self.device)
self.optimizer = utils.get_optimizer(self.model, self.opt)
self.lr_scheduler = utils.get_lr_scheduler(self.opt, self.optimizer)
self.train_loader = datasets.generate_loader(opt,'train')
self.test_loader = datasets.generate_loader(opt,'val')
self.epoch = 0
self.best_epoch = False
self.training = False
self.state = {}
self.train_loss = utils.AverageMeter()
self.test_loss = utils.AverageMeter()
self.batch_time = utils.AverageMeter()
if self.opt.loss_type in ['cce', 'bce', 'mse', 'arc_margin']:
self.test_metrics = utils.AverageMeter()
else:
self.test_metrics = utils.ROCMeter()
self.best_test_loss = utils.AverageMeter()
self.best_test_loss.update(np.array([np.inf]))
self.visdom_log_file = os.path.join(self.opt.out_path, 'log_files', 'visdom.log')
self.vis = Visdom(port = opt.visdom_port,
log_to_filename=self.visdom_log_file,
env=opt.exp_name + '_' + str(opt.fold))
self.vis_loss_opts = {'xlabel': 'epoch',
'ylabel': 'loss',
'title':'losses',
'legend': ['train_loss', 'val_loss']}
self.vis_epochloss_opts = {'xlabel': 'epoch',
'ylabel': 'loss',
'title':'epoch_losses',
'legend': ['train_loss', 'val_loss']}