本文整理汇总了Python中config.config.lr方法的典型用法代码示例。如果您正苦于以下问题:Python config.lr方法的具体用法?Python config.lr怎么用?Python config.lr使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类config.config
的用法示例。
在下文中一共展示了config.lr方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: logInit
# 需要导入模块: from config import config [as 别名]
# 或者: from config.config import lr [as 别名]
def logInit():
with open(config.logFile(), "a+") as outFile:
writeline(outFile, config.expName)
headers = ["epoch", "trainAcc", "valAcc", "trainLoss", "valLoss"]
if config.evalTrain:
headers += ["evalTrainAcc", "evalTrainLoss"]
if config.extra:
if config.evalTrain:
headers += ["thAcc", "thLoss"]
headers += ["vhAcc", "vhLoss"]
headers += ["time", "lr"]
writelist(outFile, headers)
# lr assumed to be last
# Writes log record to file
示例2: loadWeights
# 需要导入模块: from config import config [as 别名]
# 或者: from config.config import lr [as 别名]
def loadWeights(sess, saver, init):
if config.restoreEpoch > 0 or config.restore:
# restore last epoch only if restoreEpoch isn't set
if config.restoreEpoch == 0:
# restore last logged epoch
config.restoreEpoch, config.lr = lastLoggedEpoch()
print(bcolored("Restoring epoch {} and lr {}".format(config.restoreEpoch, config.lr),"cyan"))
print(bcolored("Restoring weights", "blue"))
saver.restore(sess, config.weightsFile(config.restoreEpoch))
epoch = config.restoreEpoch
else:
print(bcolored("Initializing weights", "blue"))
sess.run(init)
logInit()
epoch = 0
return epoch
###################################### training / evaluation ######################################
# Chooses data to train on (main / extra) data.
示例3: improveEnough
# 需要导入模块: from config import config [as 别名]
# 或者: from config.config import lr [as 别名]
def improveEnough(curr, prior, lr):
prevRes = prior["prev"]["res"]
currRes = curr["res"]
if prevRes is None:
return True
prevTrainLoss = prevRes["train"]["loss"]
currTrainLoss = currRes["train"]["loss"]
lossDiff = prevTrainLoss - currTrainLoss
notImprove = ((lossDiff < 0.015 and prevTrainLoss < 0.5 and lr > 0.00002) or \
(lossDiff < 0.008 and prevTrainLoss < 0.15 and lr > 0.00001) or \
(lossDiff < 0.003 and prevTrainLoss < 0.10 and lr > 0.000005))
#(prevTrainLoss < 0.2 and config.lr > 0.000015)
return not notImprove
示例4: logRecord
# 需要导入模块: from config import config [as 别名]
# 或者: from config.config import lr [as 别名]
def logRecord(epoch, epochTime, lr, trainRes, evalRes, extraEvalRes):
with open(config.logFile(), "a+") as outFile:
record = [epoch, trainRes["acc"], evalRes["val"]["acc"], trainRes["loss"], evalRes["val"]["loss"]]
if config.evalTrain:
record += [evalRes["evalTrain"]["acc"], evalRes["evalTrain"]["loss"]]
if config.extra:
if config.evalTrain:
record += [extraEvalRes["evalTrain"]["acc"], extraEvalRes["evalTrain"]["loss"]]
record += [extraEvalRes["val"]["acc"], extraEvalRes["val"]["loss"]]
record += [epochTime, lr]
writelist(outFile, record)
# Gets last logged epoch and learning rate
示例5: lastLoggedEpoch
# 需要导入模块: from config import config [as 别名]
# 或者: from config.config import lr [as 别名]
def lastLoggedEpoch():
with open(config.logFile(), "r") as inFile:
lastLine = list(inFile)[-1].split(",")
epoch = int(lastLine[0])
lr = float(lastLine[-1])
return epoch, lr
################################## printing, output and analysis ##################################
# Analysis by type
示例6: createFeedDict
# 需要导入模块: from config import config [as 别名]
# 或者: from config.config import lr [as 别名]
def createFeedDict(self, data, images, train):
feedDict = {
self.questionsIndicesAll: data["questions"],
self.questionLengthsAll: data["questionLengths"],
self.imagesPlaceholder: images["images"],
self.answersIndicesAll: data["answers"],
self.dropouts["encInput"]: config.encInputDropout if train else 1.0,
self.dropouts["encState"]: config.encStateDropout if train else 1.0,
self.dropouts["stem"]: config.stemDropout if train else 1.0,
self.dropouts["question"]: config.qDropout if train else 1.0, #_
self.dropouts["memory"]: config.memoryDropout if train else 1.0,
self.dropouts["read"]: config.readDropout if train else 1.0, #_
self.dropouts["write"]: config.writeDropout if train else 1.0,
self.dropouts["output"]: config.outputDropout if train else 1.0,
# self.dropouts["question"]Out: config.qDropoutOut if train else 1.0,
# self.dropouts["question"]MAC: config.qDropoutMAC if train else 1.0,
self.lr: config.lr,
self.train: train
}
# if config.tempDynamic:
# feedDict[self.tempAnnealRate] = tempAnnealRate
return feedDict
# Splits data to a specific GPU (tower) for parallelization
示例7: addOptimizerOp
# 需要导入模块: from config import config [as 别名]
# 或者: from config.config import lr [as 别名]
def addOptimizerOp(self):
with tf.variable_scope("trainAddOptimizer"):
self.globalStep = tf.Variable(0, dtype = tf.int32, trainable = False, name = "globalStep") # init to 0 every run?
optimizer = tf.train.AdamOptimizer(learning_rate = self.lr)
return optimizer
示例8: optimizer
# 需要导入模块: from config import config [as 别名]
# 或者: from config.config import lr [as 别名]
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=FLAGS.lr * (FLAGS.batch / 256.0),
trainable=False)
if FLAGS.optimizer == 'momentum':
opt = tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)
else:
assert FLAGS.optimizer == 'adam'
opt = tf.train.AdamOptimizer(lr)
return opt
示例9: get_config
# 需要导入模块: from config import config [as 别名]
# 或者: from config.config import lr [as 别名]
def get_config(model):
nr_tower = max(get_num_gpu(), 1)
assert FLAGS.batch % nr_tower == 0
batch = FLAGS.batch // nr_tower
logger.info("Running on {} towers. Batch size per tower: {}".format(nr_tower, batch))
data = QueueInput(get_dataflow(FLAGS.train_list_filename, batch))
# learning rate
START_LR = FLAGS.lr
BASE_LR = START_LR * (FLAGS.batch / 256.0)
lr_list = []
for idx, decay_point in enumerate(FLAGS.lr_decay_points):
lr_list.append((decay_point, BASE_LR * 0.1 ** idx))
callbacks = [
ScopeModelSaver(checkpoint_dir=FLAGS.RHP_savepath, scope='RHP'),
EstimatedTimeLeft(),
ScheduledHyperParamSetter('learning_rate', lr_list),
]
if get_num_gpu() > 0:
callbacks.append(GPUUtilizationTracker())
return TrainConfig(
model=model,
data=data,
callbacks=callbacks,
steps_per_epoch=FLAGS.steps_per_epoch // FLAGS.batch,
max_epoch=FLAGS.max_epoch,
session_init=MultipleRestore()
)
示例10: renew_everything
# 需要导入模块: from config import config [as 别名]
# 或者: from config.config import lr [as 别名]
def renew_everything(self):
# renew dataloader.
self.loader = DL.dataloader(config)
self.loader.renew(min(floor(self.resl), self.max_resl))
# define tensors
self.z = torch.FloatTensor(self.loader.batchsize, self.nz)
self.x = torch.FloatTensor(self.loader.batchsize, 3, self.loader.imsize, self.loader.imsize)
self.x_tilde = torch.FloatTensor(self.loader.batchsize, 3, self.loader.imsize, self.loader.imsize)
self.real_label = torch.FloatTensor(self.loader.batchsize).fill_(1)
self.fake_label = torch.FloatTensor(self.loader.batchsize).fill_(0)
# enable cuda
if self.use_cuda:
self.z = self.z.cuda()
self.x = self.x.cuda()
self.x_tilde = self.x.cuda()
self.real_label = self.real_label.cuda()
self.fake_label = self.fake_label.cuda()
torch.cuda.manual_seed(config.random_seed)
# wrapping autograd Variable.
self.x = Variable(self.x)
self.x_tilde = Variable(self.x_tilde)
self.z = Variable(self.z)
self.real_label = Variable(self.real_label)
self.fake_label = Variable(self.fake_label)
# ship new model to cuda.
if self.use_cuda:
self.G = self.G.cuda()
self.D = self.D.cuda()
# optimizer
betas = (self.config.beta1, self.config.beta2)
if self.optimizer == 'adam':
self.opt_g = Adam(filter(lambda p: p.requires_grad, self.G.parameters()), lr=self.lr, betas=betas, weight_decay=0.0)
self.opt_d = Adam(filter(lambda p: p.requires_grad, self.D.parameters()), lr=self.lr, betas=betas, weight_decay=0.0)
示例11: renew_everything
# 需要导入模块: from config import config [as 别名]
# 或者: from config.config import lr [as 别名]
def renew_everything(self):
'''Renew the dataloader
'''
self.loader = dl.dataloader(self.config)
self.loader.renew(min(floor(self.resl), self.max_resl))
# Define tensors
self.z = torch.FloatTensor(self.loader.batchsize, self.nz)
self.x = torch.FloatTensor(self.loader.batchsize, 3, self.loader.imsize, self.loader.imsize)
self.x_tilde = torch.FloatTensor(self.loader.batchsize, 3, self.loader.imsize, self.loader.imsize)
self.real_label = torch.FloatTensor(self.loader.batchsize).fill_(1)
self.fake_label = torch.FloatTensor(self.loader.batchsize).fill_(0)
# Enable CUDA
if self.use_cuda:
self.z = self.z.cuda()
self.x = self.x.cuda()
self.x_tilde = self.x_tilde.cuda()
self.real_label = self.real_label.cuda()
self.fake_label = self.fake_label.cuda()
torch.cuda.manual_seed(config.random_seed)
# Wrapping `autograd.Variable`
self.x = Variable(self.x)
self.x_tilde = Variable(self.x_tilde)
self.z = Variable(self.z)
self.real_label = Variable(self.real_label)
self.fake_label = Variable(self.fake_label)
# Ship new model to CUDA
if self.use_cuda:
self.G = self.G.cuda()
self.D = self.D.cuda()
# Setup the optimizer
betas = (self.config.beta1, self.config.beta2)
if self.optimizer == 'adam':
self.opt_g = Adam(filter(lambda p: p.requires_grad, self.G.parameters()), lr=self.lr, betas=betas, weight_decay=0.0)
self.opt_d = Adam(filter(lambda p: p.requires_grad, self.D.parameters()), lr=self.lr, betas=betas, weight_decay=0.0)
示例12: addPlaceholders
# 需要导入模块: from config import config [as 别名]
# 或者: from config.config import lr [as 别名]
def addPlaceholders(self):
with tf.variable_scope("Placeholders"):
## data
# questions
self.questionsIndicesAll = tf.placeholder(tf.int32, shape = (None, None))
self.questionLengthsAll = tf.placeholder(tf.int32, shape = (None, ))
# images
# put image known dimension as last dim?
self.imagesPlaceholder = tf.placeholder(tf.float32, shape = (None, None, None, None))
self.imagesAll = tf.transpose(self.imagesPlaceholder, (0, 2, 3, 1))
# self.imageH = tf.shape(self.imagesAll)[1]
# self.imageW = tf.shape(self.imagesAll)[2]
# answers
self.answersIndicesAll = tf.placeholder(tf.int32, shape = (None, ))
## optimization
self.lr = tf.placeholder(tf.float32, shape = ())
self.train = tf.placeholder(tf.bool, shape = ())
self.batchSizeAll = tf.shape(self.questionsIndicesAll)[0]
## dropouts
# TODO: change dropouts to be 1 - current
self.dropouts = {
"encInput": tf.placeholder(tf.float32, shape = ()),
"encState": tf.placeholder(tf.float32, shape = ()),
"stem": tf.placeholder(tf.float32, shape = ()),
"question": tf.placeholder(tf.float32, shape = ()),
# self.dropouts["question"]Out = tf.placeholder(tf.float32, shape = ())
# self.dropouts["question"]MAC = tf.placeholder(tf.float32, shape = ())
"read": tf.placeholder(tf.float32, shape = ()),
"write": tf.placeholder(tf.float32, shape = ()),
"memory": tf.placeholder(tf.float32, shape = ()),
"output": tf.placeholder(tf.float32, shape = ())
}
# batch norm params
self.batchNorm = {"decay": config.bnDecay, "train": self.train}
# if config.parametricDropout:
# self.dropouts["question"] = parametricDropout("qDropout", self.train)
# self.dropouts["read"] = parametricDropout("readDropout", self.train)
# else:
# self.dropouts["question"] = self.dropouts["_q"]
# self.dropouts["read"] = self.dropouts["_read"]
# if config.tempDynamic:
# self.tempAnnealRate = tf.placeholder(tf.float32, shape = ())
self.H, self.W, self.imageInDim = config.imageDims
# Feeds data into placeholders. See addPlaceholders method for further details.
示例13: __init__
# 需要导入模块: from config import config [as 别名]
# 或者: from config.config import lr [as 别名]
def __init__(self, config):
self.config = config
if torch.cuda.is_available():
self.use_cuda = True
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
self.use_cuda = False
torch.set_default_tensor_type('torch.FloatTensor')
self.nz = config.nz
self.optimizer = config.optimizer
self.resl = 2 # we start with resolution 2^2 = 4
self.lr = config.lr
self.eps_drift = config.eps_drift
self.smoothing = config.smoothing
self.max_resl = config.max_resl
self.trns_tick = config.trns_tick
self.stab_tick = config.stab_tick
self.TICK = config.TICK
self.global_iter = 0
self.global_tick = 0
self.kimgs = 0
self.stack = 0
self.epoch = 0
self.fadein = {'gen': None, 'dis': None}
self.complete = {'gen': 0, 'dis': 0}
self.phase = 'init'
self.flag_flush_gen = False
self.flag_flush_dis = False
self.flag_add_noise = self.config.flag_add_noise
self.flag_add_drift = self.config.flag_add_drift
# Network settings
self.G = Generator(config)
print('Generator architecture:\n{}'.format(self.G.model))
self.D = Discriminator(config)
print('Discriminator architecture:\n{}'.format(self.D.model))
self.criterion = nn.MSELoss()
if self.use_cuda:
self.criterion = self.criterion.cuda()
torch.cuda.manual_seed(config.random_seed)
if config.n_gpu == 1:
self.G = nn.DataParallel(self.G).cuda(device=0)
self.D = nn.DataParallel(self.D).cuda(device=0)
else:
gpus = []
for i in range(config.n_gpu):
gpus.append(i)
self.G = nn.DataParallel(self.G, device_ids=gpus).cuda()
self.D = nn.DataParallel(self.D, device_ids=gpus).cuda()
# Define tensors, ship model to cuda, and get dataloader
self.renew_everything()
# Tensorboard
self.use_tb = config.use_tb
if self.use_tb:
self.tb = tensorboard.tf_recorder()