本文整理汇总了Python中utils.logger函数的典型用法代码示例。如果您正苦于以下问题:Python logger函数的具体用法?Python logger怎么用?Python logger使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了logger函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fixfilebase
def fixfilebase(self, f):
assert f.startswith(self.expectedpath), [f, self.expectedpath]
f = self.newpath + f[len(self.expectedpath):]
schemafile = f.split(':')[-1].split('#')[0]
if not cmn.fexists(schemafile):
logger('#err ...schema file {0} not found\n'.format(schemafile))
return f
示例2: main
def main(args):
if not args.has('-config'):
args.add_key('-config', "./config.json")
logger(str([args.keys, args.args()]) + '\n')
if not args.has('-out'):
logger('#__noOutFileGiven___\n')
return
if not args.has('-dbg') and (cmn.fexists(args['-out']) and not args.has('-overwrite-outfile')):
logger('#__outfile:{0} exists\n'.format(args['-out']))
return
#try:
if True:
if args.has('-extract'):
import sraparse
return sraparse.SRAParseObjSet.extract_attributes_to_json(args.args())
elif args.has("-test-sample"):
testargs = ["./examples/samples.xml", "-config:{0}".format(args['-config']), "-out:./examples/samples.versioned.xml"]
validate_sample.main(Config(testargs))
elif args.has("-sample"):
validate_sample.main(args)
elif args.has("-experiment"):
validate_experiment.main(args)
else:
raise NotImplementedError("#__unknownArguments__")
else:
#except Exception as err:
logger('#__unexpected__\n')
logger(str(err.message) + '\n')
示例3: E
def E(level=1):
if level == 0:
from common import level1 as P
P = partial(P, FOnly=True) # high order function, here we only test LEVEL-1 F CNN
elif level == 1:
from level import level1 as P
elif level == 2:
from level import level2 as P
else:
from level import level3 as P
data = getDataFromTxt(TXT)
error = np.zeros((len(data), 5))
for i in range(len(data)):
imgPath, bbox, landmarkGt = data[i]
img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
assert(img is not None)
logger("process %s" % imgPath)
landmarkP = P(img, bbox)
plot_point[i] = landmarkP
# real landmark
landmarkP = bbox.reprojectLandmark(landmarkP)
landmarkGt = bbox.reprojectLandmark(landmarkGt)
error[i] = evaluateError(landmarkGt, landmarkP, bbox)
return error
示例4: start
def start(self):
# self.start.__doc__ = self._thread.start.__doc__
if not self.started:
self.started = True
self.return_value = self.target(*self.args, **self.kwargs)
logger('fake_thread_started', self.target.__name__)
else:
raise RuntimeError()
示例5: obj_id
def obj_id(self, e):
try:
idblock = e.get('@idblock', dict())
tags = [idblock[k] for k in ['alias', 'refname', 'accession'] if k in idblock]
return 'unknown' if not tags else self.sanitizer.filter_alphan('.'.join(tags), '.-_')
except Exception as e:
logger('#__couldNotExactId__:{0}\n'.format(e ))
return 'unknown'
示例6: get_words
def get_words(terminals, landmarks, rel=None):
words = []
probs = []
entropy = []
for n,lmk in zip(terminals, landmarks):
# if we could not get an expansion for the LHS, we just pass down the unexpanded nonterminal symbol
# it gets the probability of 1 and entropy of 0
if n in NONTERMINALS:
words.append(n)
probs.append(1.0)
entropy.append(0.0)
continue
lmk_class = (lmk.object_class if lmk else None)
lmk_color = (lmk.color if lmk else None)
rel_class = rel_type(rel)
dist_class = (rel.measurement.best_distance_class if hasattr(rel, 'measurement') else None)
deg_class = (rel.measurement.best_degree_class if hasattr(rel, 'measurement') else None)
cp_db = CWord.get_word_counts(pos=n,
lmk_class=lmk_class,
lmk_ori_rels=get_lmk_ori_rels_str(lmk),
lmk_color=lmk_color,
rel=rel_class,
rel_dist_class=dist_class,
rel_deg_class=deg_class)
if cp_db.count() <= 0:
logger( 'Could not expand %s (lmk_class: %s, lmk_color: %s, rel: %s, dist_class: %s, deg_class: %s)' % (n, lmk_class, lmk_color, rel_class, dist_class, deg_class) )
terminals.append( n )
continue
logger( 'Expanded %s (lmk_class: %s, lmk_color: %s, rel: %s, dist_class: %s, deg_class: %s)' % (n, lmk_class, lmk_color, rel_class, dist_class, deg_class) )
ckeys, ccounts = zip(*[(cword.word,cword.count) for cword in cp_db.all()])
ccounter = {}
for cword in cp_db.all():
if cword.word in ccounter: ccounter[cword.word] += cword.count
else: ccounter[cword.word] = cword.count
ckeys, ccounts = zip(*ccounter.items())
# print 'ckeys', ckeys
# print 'ccounts', ccounts
ccounts = np.array(ccounts, dtype=float)
ccounts /= ccounts.sum()
w, w_prob, w_entropy = categorical_sample(ckeys, ccounts)
words.append(w)
probs.append(w_prob)
entropy.append(w_entropy)
p, H = np.prod(probs), np.sum(entropy)
# print 'expanding %s to %s (p: %f, H: %f)' % (terminals, words, p, H)
return words, p, H
示例7: get_sentence_meaning_likelihood
def get_sentence_meaning_likelihood(sentence, lmk, rel):
modparse = get_modparse(sentence)
t = ParentedTree.parse(modparse)
print '\n%s\n' % t.pprint()
probs, entropies, lrpc, tps = get_tree_probs(t, lmk, rel)
if np.prod(probs) == 0.0:
logger('ERROR: Probability product is 0 for sentence: %s, lmk: %s, rel: %s, probs: %s' % (sentence, lmk, rel, str(probs)))
return np.prod(probs), sum(entropies), lrpc, tps
示例8: __init__
def __init__(self, sra, validators):
super(SampleValidator, self).__init__(validators)
self.normalize = lambda t: t.lower().replace(' ', '_')
self.sra = sra
self.xmljson = self.sra.obj_xmljson()
for (xml, attrs) in self.xmljson:
logger('\n#__normalizingTags:{0}\n'.format(attrs['title']))
attrs['attributes'] = self.normalize_tags(attrs['attributes'])
logger("\n\n")
示例9: validate_semantics
def validate_semantics(self, attrs):
attributes = attrs['attributes']
if 'donor_age_unit' in attributes and attributes['donor_age_unit'] == 'year' and isinstance(attributes['donor_age'], int):
age = int(attributes['donor_age'])
if age > 90:
logger('#__error: Donors over 90 years of age should be entered as "90+"\n')
return False
return True
示例10: run_once
def run_once(self, make_thread=True, last_update_id=None, update_timeout=30):
""" Check the the messages for commands and make a Thread or FakeThread with the command depending on make_thread.
Args:
make_thread:
True: the function returns a list with threads. Which didn't start yet.
False: the function returns a list with FakeThreads. Which did'nt start yet.
last_update_id:
the offset arg from getUpdates and is kept up to date within this function
update_timeout:
timeout for updates. can be None for no timeout.
Returns:
A tuple of two elements. The first element is a list with Threads or FakeThreads which didn't start yet.
The second element is the updated las_update_id
"""
if make_thread:
ch_Thread = threading.Thread
else:
ch_Thread = FakeThread
bot_name = self.bot.username
threads = {}
self._getupdates_can_write.append(True)
get_updates_index = len(self._getupdates_can_write) - 1
get_updates_thread = threading.Thread(target=self.get_updates,
kwargs={'index': get_updates_index,
'offset': last_update_id})
get_updates_thread.start()
get_updates_thread.join(timeout=update_timeout)
if get_updates_thread.isAlive():
logger('ERROR getupdates timed out, using empty list')
self._getupdates_can_write[get_updates_index] = False
self._last_updates = []
updates = self._last_updates
for update in updates:
last_update_id = update.update_id + 1
message = update.message
if len(message.text) == 0:
message.text = ' '
if message.text[0] == '/':
command, username = message.text.split(' ')[0], bot_name
if '@' in command:
command, username = command.split('@')
if username == bot_name:
command_func = self._get_command_func(command)
if command_func is not None:
self.bot.sendChatAction(chat_id=update.message.chat.id, action=telegram.ChatAction.TYPING)
if self.isValidCommand is None or self.isValidCommand(update):
t = ch_Thread(target=command_func, args=(update,))
threads[(message.text, update.message.chat.id)] = t
else:
t = ch_Thread(target=self._command_not_valid, args=(update,))
threads[(message.text + ' unauthorized', update.message.chat.id)] = t
else:
t = ch_Thread(target=self._command_not_found, args=(update,))
threads[(message.text + ' not found', update.message.chat.id)] = t
return threads, last_update_id
示例11: from_sra_main_to_attributes
def from_sra_main_to_attributes(self, hashed):
if 'library_strategy' in hashed:
if 'LIBRARY_STRATEGY' in hashed['attributes'] or 'library_strategy' in hashed['attributes']:
lib_strat_attr = 'LIBRARY_STRATEGY' if 'LIBRARY_STRATEGY' in hashed['attributes'] else 'library_strategy'
hashed['attributes']['LIBRARY_STRATEGY_IHEC'] = hashed['attributes'][lib_strat_attr]
old_lib_start = hashed['attributes'].pop(lib_strat_attr)
logger("#warn:__library_strategy__ defined in both SRA block and as IHEC attribute:{0}, value pushed into 'LIBRARY_STRATEGY_IHEC'\n".format(old_lib_start))
hashed['attributes']['LIBRARY_STRATEGY'] = [hashed['library_strategy']]
#hashed['attributes']['@idblock'] = hashed['@idblock']
return hashed
示例12: extract_additional_experiment_attributes
def extract_additional_experiment_attributes(self, obj, hashed):
strategy = hashed.get("library_strategy", "" ).strip()
if not strategy:
strategy = self.extract_optional(obj, ".//SEQUENCING_LIBRARY_STRATEGY")
if not strategy or len(strategy) > 1:
logger("#warn__: cannot parse 'library_strategy' or 'library_sequencing_strategy'.. {0}\n ".format(str(strategy)))
else:
logger("#warn__: updated 'library_strategy' with 'library_sequencing_strategy'.. {0}\n ".format(str(strategy[0].text)))
hashed["library_strategy"] = strategy[0].text.strip()
return hashed
示例13: get_updates
def get_updates(self, *args, index, offset, **kwargs):
try:
temp = self.bot.getUpdates(*args, offset=offset, **kwargs)
except Exception as e:
temp = []
logger('because an error occoured updates will be empty id:', index, type(e), e.args, e)
if self._getupdates_can_write[index]:
self._last_updates = temp
else:
logger('error get_updates done. but not able to send output.', index)
return temp
示例14: _generate_help_list
def _generate_help_list(self):
logger('methods', [attr[0] for attr in getmembers(self, predicate=ismethod)])
command_functions = [attr[1] for attr in getmembers(self, predicate=ismethod) if attr[0][:8] == 'command_' and
attr[0] not in self.skip_in_help]
help_message = ''
for command_function in command_functions:
if command_function.__doc__ is not None:
help_message += ' /' + command_function.__name__[8:] + ' - ' + command_function.__doc__ + '\n'
else:
help_message += ' /' + command_function.__name__[8:] + ' - ' + '\n'
return help_message
示例15: probs_metric
def probs_metric(inverse=False):
rand_p = Vec2(random()*table.width+table.min_point.x, random()*table.height+table.min_point.y)
try:
bestmeaning, bestsentence = generate_sentence(rand_p, False, scene, speaker, usebest=True, golden=inverse, printing=printing)
sampled_landmark, sampled_relation = bestmeaning.args[0], bestmeaning.args[3]
golden_posteriors = get_all_sentence_posteriors(bestsentence, meanings, golden=(not inverse), printing=printing)
# lmk_prior = speaker.get_landmark_probability(sampled_landmark, landmarks, PointRepresentation(rand_p))[0]
all_lmk_probs = speaker.all_landmark_probs(landmarks, Landmark(None, PointRepresentation(rand_p), None))
all_lmk_probs = dict(zip(landmarks, all_lmk_probs))
lmk_prior = all_lmk_probs[sampled_landmark]
head_on = speaker.get_head_on_viewpoint(sampled_landmark)
rel_prior = speaker.get_probabilities_points( np.array([rand_p]), sampled_relation, head_on, sampled_landmark)
lmk_post = golden_posteriors[sampled_landmark]
rel_post = golden_posteriors[sampled_relation]
ps = np.array([golden_posteriors[lmk]*golden_posteriors[rel] for lmk, rel in meanings])
rank = None
for i,p in enumerate(ps):
lmk,rel = meanings[i]
# logger( '%f, %s' % (p, m2s(lmk,rel)))
head_on = speaker.get_head_on_viewpoint(lmk)
# ps[i] *= speaker.get_landmark_probability(lmk, landmarks, PointRepresentation(rand_p))[0]
ps[i] *= all_lmk_probs[lmk]
ps[i] *= speaker.get_probabilities_points( np.array([rand_p]), rel, head_on, lmk)
if lmk == sampled_landmark and rel == sampled_relation:
idx = i
ps += epsilon
ps = ps/ps.sum()
prob = ps[idx]
rank = sorted(ps, reverse=True).index(prob)
entropy = entropy_of_probs(ps)
except (ParseError,RuntimeError) as e:
logger( e )
lmk_prior = 0
rel_prior = 0
lmk_post = 0
rel_post = 0
prob = 0
rank = len(meanings)-1
entropy = 0
distances = [[None]]
head_on = speaker.get_head_on_viewpoint(sampled_landmark)
all_descs = speaker.get_all_meaning_descriptions(trajector, scene, sampled_landmark, sampled_relation, head_on, 1)
distances = []
for desc in all_descs:
distances.append([edit_distance( bestsentence, desc ), desc])
distances.sort()
return lmk_prior,rel_prior,lmk_post,rel_post,\
prob,entropy,rank,distances[0][0],type(sampled_relation)