本文整理汇总了Python中logger.Log.update方法的典型用法代码示例。如果您正苦于以下问题:Python Log.update方法的具体用法?Python Log.update怎么用?Python Log.update使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类logger.Log
的用法示例。
在下文中一共展示了Log.update方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from logger import Log [as 别名]
# 或者: from logger.Log import update [as 别名]
class ApolloDB:
def __init__(self, host_="warhol-fred.psc.edu",
user_="apolloext", dbname_="apollo201", password_=None, logger_=None):
self._host = host_
self._user = user_
self._dbname = dbname_
self._password = password_
self._conn = None
self._DictCursor = None
self._RegularCursor = None
self.populationAxis = None
if logger_ is None:
self.logger = Log("./db.test.log")
self.logger.start()
else:
self.logger = logger_
self.stateToPopulationDict = {'S':'susceptible', 'E':'exposed', 'I':'infectious',
'R':'recovered', 'C':'newly exposed', 'V':'received vaccine control measure',
'Av':'received antiviral control measure', 'ScCl':'school that is closed'}
self.stateToDataFileDict = {'S':'susceptible.txt', 'E':'exposed.txt', 'I':'infectious.txt',
'R':'recovered.txt', 'C':'newly_exposed.txt', 'V':'vacc_administered.txt',
'Av':'av_administered.txt'}
def connect(self):
if self._conn is not None:
print "Connection to Apollo Database has already been established"
return
try:
if self._password is None:
self._conn = MySQLdb.connect(host=self._host, user=self._user, db=self._dbname)
else:
self._conn = MySQLdb.connect(host=self._host,
user=self._user,
passwd=self._password,
db=self._dbname)
self._conn.autocommit(True)
self.logger.update('DB_CONNECT_SUCCESS')
except MySQLdb.Error, e:
print "Problem connecting to Apollo database: %d %s" % (e.args[0], e.args[1])
self.logger.update('DB_CONNECT_FAILED')
raise e
self._cursor = self._conn.cursor(MySQLdb.cursors.DictCursor)
self.populationAxis = self._populationAxis()
示例2: trainer
# 需要导入模块: from logger import Log [as 别名]
# 或者: from logger.Log import update [as 别名]
def trainer(load_from=None, save_dir="snapshots", name="anon", **kwargs):
"""
:param load_from: location to load parameters + options from
:param name: name of model, used as location to save parameters + options
"""
curr_model = dict()
# load old model, including parameters, but overwrite with new options
if load_from:
print "reloading..." + load_from
with open("%s.pkl" % load_from, "rb") as f:
curr_model = pkl.load(f)
else:
curr_model["options"] = {}
for k, v in kwargs.iteritems():
curr_model["options"][k] = v
model_options = curr_model["options"]
# initialize logger
import datetime
timestampedName = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + "_" + name
from logger import Log
log = Log(
name=timestampedName, hyperparams=model_options, saveDir="vis/training", xLabel="Examples Seen", saveFrequency=1
)
print curr_model["options"]
# Load training and development sets
print "Loading dataset"
dataset = load_dataset(model_options["data"], cnn=model_options["cnn"], load_train=True)
train = dataset["train"]
dev = dataset["dev"]
# Create dictionary
print "Creating dictionary"
worddict = build_dictionary(train["caps"] + dev["caps"])
print "Dictionary size: " + str(len(worddict))
curr_model["worddict"] = worddict
curr_model["options"]["n_words"] = len(worddict) + 2
# save model
pkl.dump(curr_model, open("%s/%s.pkl" % (save_dir, name), "wb"))
print "Loading data"
train_iter = datasource.Datasource(train, batch_size=model_options["batch_size"], worddict=worddict)
dev = datasource.Datasource(dev, worddict=worddict)
dev_caps, dev_ims = dev.all()
print "Building model"
params = init_params(model_options)
# reload parameters
if load_from is not None and os.path.exists(load_from):
params = load_params(load_from, params)
tparams = init_tparams(params)
inps, cost = build_model(tparams, model_options)
print "Building sentence encoder"
inps_se, sentences = build_sentence_encoder(tparams, model_options)
f_senc = theano.function(inps_se, sentences, profile=False)
print "Building image encoder"
inps_ie, images = build_image_encoder(tparams, model_options)
f_ienc = theano.function(inps_ie, images, profile=False)
print "Building f_grad...",
grads = tensor.grad(cost, wrt=itemlist(tparams))
print "Building errors.."
inps_err, errs = build_errors(model_options)
f_err = theano.function(inps_err, errs, profile=False)
curr_model["f_senc"] = f_senc
curr_model["f_ienc"] = f_ienc
curr_model["f_err"] = f_err
if model_options["grad_clip"] > 0.0:
grads = [maxnorm(g, model_options["grad_clip"]) for g in grads]
lr = tensor.scalar(name="lr")
print "Building optimizers...",
# (compute gradients), (updates parameters)
f_grad_shared, f_update = eval(model_options["optimizer"])(lr, tparams, grads, inps, cost)
print "Optimization"
uidx = 0
curr = 0
n_samples = 0
for eidx in xrange(model_options["max_epochs"]):
#.........这里部分代码省略.........
示例3: trainer
# 需要导入模块: from logger import Log [as 别名]
# 或者: from logger.Log import update [as 别名]
def trainer(load_from=None,
save_dir='snapshots',
name='anon',
**kwargs):
"""
:param load_from: location to load parameters + options from
:param name: name of model, used as location to save parameters + options
"""
curr_model = dict()
# load old model, including parameters, but overwrite with new options
if load_from:
print 'reloading...' + load_from
with open('%s.pkl'%load_from, 'rb') as f:
curr_model = pkl.load(f)
else:
curr_model['options'] = {}
for k, v in kwargs.iteritems():
curr_model['options'][k] = v
model_options = curr_model['options']
# initialize logger
import datetime
timestampedName = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + '_' + name
from logger import Log
log = Log(name=timestampedName, hyperparams=model_options, saveDir='vis/training',
xLabel='Examples Seen', saveFrequency=1)
print curr_model['options']
# Load training and development sets
print 'Loading dataset'
dataset = load_dataset(model_options['data'], cnn=model_options['cnn'], load_train=True)
train = dataset['train']
dev = dataset['dev']
# Create dictionary
print 'Creating dictionary'
worddict = build_dictionary(train['caps']+dev['caps'])
print 'Dictionary size: ' + str(len(worddict))
curr_model['worddict'] = worddict
curr_model['options']['n_words'] = len(worddict) + 2
# save model
pkl.dump(curr_model, open('%s/%s.pkl' % (save_dir, name), 'wb'))
print 'Loading data'
train_iter = datasource.Datasource(train, batch_size=model_options['batch_size'], worddict=worddict)
dev = datasource.Datasource(dev, worddict=worddict)
dev_caps, dev_ims = dev.all()
print 'Building model'
params = init_params(model_options)
# reload parameters
if load_from is not None and os.path.exists(load_from):
params = load_params(load_from, params)
tparams = init_tparams(params)
inps, cost = build_model(tparams, model_options)
print 'Building sentence encoder'
inps_se, sentences = build_sentence_encoder(tparams, model_options)
f_senc = theano.function(inps_se, sentences, profile=False)
print 'Building image encoder'
inps_ie, images = build_image_encoder(tparams, model_options)
f_ienc = theano.function(inps_ie, images, profile=False)
print 'Building f_grad...',
grads = tensor.grad(cost, wrt=itemlist(tparams))
print 'Building errors..'
inps_err, errs = build_errors(model_options)
f_err = theano.function(inps_err, errs, profile=False)
curr_model['f_senc'] = f_senc
curr_model['f_ienc'] = f_ienc
curr_model['f_err'] = f_err
if model_options['grad_clip'] > 0.:
grads = [maxnorm(g, model_options['grad_clip']) for g in grads]
lr = tensor.scalar(name='lr')
print 'Building optimizers...',
# (compute gradients), (updates parameters)
f_grad_shared, f_update = eval(model_options['optimizer'])(lr, tparams, grads, inps, cost)
print 'Optimization'
#.........这里部分代码省略.........