本文整理汇总了Python中warnings.append函数的典型用法代码示例。如果您正苦于以下问题:Python append函数的具体用法?Python append怎么用?Python append使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了append函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check_file_list
def check_file_list(any_format,ignore_warnings=True, warnings=[]):
"""
get the folder for the job or a batch job
a batch job can be either a readable text file name: file_name.any_extension - contents must be file with list of folders, one per line
or it can be a glob-like search pattern for folders//*
the folders may or may not contain valid tif data with well-named frames
if any are invalid, the invalid ones are added to warnings. the valid ones are returned in a list
a caller can decide what to do about the warnings which will be listed (ignore/quit)
If print_info==True:
the following folders can be processed:
folder range #gaps
the following cannot be processed:
folder issue
"""
import os
from glob import glob
folders = [any_format]
if os.is_file(any_format):
with open(any_format) as f: folders = [_f for _f in f]
if any_format[-1]=="*": folders = list(glob(any_format))
valid_folders = []
for f in folders:
c = context.folder_context(f)
if not c.meta_key["valid"]: warnings.append(f)
else: valid_folders.append(c.meta_key)
#list the goods and the bads by simply listing the info for the folder but then say warnings in tabs if req
if ignore_warnings == False and len(warnings) > 0:
#some folders cannot be processed. Do you want to continue(y) or quit(n)?
pass
return valid_folders
示例2: load_notebook
def load_notebook(resources=None, verbose=False, hide_banner=False):
''' Prepare the IPython notebook for displaying Bokeh plots.
Args:
resources (Resource, optional) :
how and where to load BokehJS from
verbose (bool, optional) :
whether to report detailed settings (default: False)
hide_banner (bool, optional):
whether to hide the Bokeh banner (default: False)
Returns:
None
'''
global _notebook_loaded
from .resources import INLINE
from .templates import NOTEBOOK_LOAD, RESOURCES
if resources is None:
resources = INLINE
plot_resources = RESOURCES.render(
js_raw = resources.js_raw,
css_raw = resources.css_raw,
js_files = resources.js_files,
css_files = resources.css_files,
)
if resources.mode == 'inline':
js_info = 'inline'
css_info = 'inline'
else:
js_info = resources.js_files[0] if len(resources.js_files) == 1 else resources.js_files
css_info = resources.css_files[0] if len(resources.css_files) == 1 else resources.css_files
warnings = ["Warning: " + msg['text'] for msg in resources.messages if msg['type'] == 'warn']
if _notebook_loaded:
warnings.append('Warning: BokehJS previously loaded')
_notebook_loaded = resources
html = NOTEBOOK_LOAD.render(
plot_resources = plot_resources,
logo_url = resources.logo_url,
verbose = verbose,
js_info = js_info,
css_info = css_info,
bokeh_version = __version__,
warnings = warnings,
hide_banner = hide_banner,
)
utils.publish_display_data({'text/html': html})
示例3: validate_nbextension_python
def validate_nbextension_python(spec, full_dest, logger=None):
"""Assess the health of an installed nbextension
Returns a list of warnings.
Parameters
----------
spec : dict
A single entry of _jupyter_nbextension_paths():
[{
'section': 'notebook',
'src': 'mockextension',
'dest': '_mockdestination',
'require': '_mockdestination/index'
}]
full_dest : str
The on-disk location of the installed nbextension: this should end
with `nbextensions/<dest>`
logger : Jupyter logger [optional]
Logger instance to use
"""
infos = []
warnings = []
section = spec.get("section", None)
if section in NBCONFIG_SECTIONS:
infos.append(u" {} section: {}".format(GREEN_OK, section))
else:
warnings.append(u" {} section: {}".format(RED_X, section))
require = spec.get("require", None)
if require is not None:
require_path = os.path.join(
full_dest[0:-len(spec["dest"])],
u"{}.js".format(require))
if os.path.exists(require_path):
infos.append(u" {} require: {}".format(GREEN_OK, require_path))
else:
warnings.append(u" {} require: {}".format(RED_X, require_path))
if logger:
if warnings:
logger.warning("- Validating: problems found:")
for msg in warnings:
logger.warning(msg)
for msg in infos:
logger.info(msg)
logger.warning(u"Full spec: {}".format(spec))
else:
logger.info(u"- Validating: {}".format(GREEN_OK))
return warnings
示例4: add_location
def add_location(tracker, item):
if 'track' not in item:
# We can only add location for 'track' items.
return item
tracker_item = tracker.track(artist_name=item['artist'], track_name=item['track'])
if tracker_item:
item.update(tracker_item)
else:
warnings = item.get('warnings', [])
warnings.append('tracker: Unknown track')
item['warnings'] = warnings
return item
示例5: autosummary_directive
def autosummary_directive(dirname, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""
Pretty table containing short signatures and summaries of functions etc.
autosummary also generates a (hidden) toctree:: node.
"""
names = []
names += [x.strip().split()[0] for x in content
if x.strip() and re.search(r'^[a-zA-Z_]', x.strip()[0])]
table, warnings, real_names = get_autosummary(names, state,
'nosignatures' in options)
node = table
env = state.document.settings.env
suffix = env.config.source_suffix
all_docnames = env.found_docs.copy()
dirname = posixpath.dirname(env.docname)
if 'toctree' in options:
tree_prefix = options['toctree'].strip()
docnames = []
for name in names:
name = real_names.get(name, name)
docname = tree_prefix + name
if docname.endswith(suffix):
docname = docname[:-len(suffix)]
docname = posixpath.normpath(posixpath.join(dirname, docname))
if docname not in env.found_docs:
warnings.append(state.document.reporter.warning(
'toctree references unknown document %r' % docname,
line=lineno))
docnames.append(docname)
tocnode = sphinx.addnodes.toctree()
tocnode['includefiles'] = docnames
tocnode['maxdepth'] = -1
tocnode['glob'] = None
tocnode['entries'] = [(None, docname) for docname in docnames]
tocnode = autosummary_toc('', '', tocnode)
return warnings + [node] + [tocnode]
else:
return warnings + [node]
示例6: validate_nbextension
def validate_nbextension(require, logger=None):
"""Validate a named nbextension.
Looks across all of the nbextension directories.
Returns a list of warnings.
require : str
require.js path used to load the extension
logger : Jupyter logger [optional]
Logger instance to use
"""
warnings = []
infos = []
js_exists = False
for exts in _nbextension_dirs():
# Does the Javascript entrypoint actually exist on disk?
js = u"{}.js".format(os.path.join(exts, *require.split("/")))
js_exists = os.path.exists(js)
if js_exists:
break
require_tmpl = u" - require? {} {}"
if js_exists:
infos.append(require_tmpl.format(GREEN_OK, require))
else:
warnings.append(require_tmpl.format(RED_X, require))
if logger:
if warnings:
logger.warning(u" - Validating: problems found:")
for msg in warnings:
logger.warning(msg)
for msg in infos:
logger.info(msg)
else:
logger.info(u" - Validating: {}".format(GREEN_OK))
return warnings
示例7: _read_warnings
def _read_warnings(self):
"""Poll all the warning loggers and extract any new warnings that have
been logged. If the warnings belong to a category that is currently
disabled, this method will discard them and they will no longer be
retrievable.
Returns a list of (timestamp, message) tuples, where timestamp is an
integer epoch timestamp."""
warnings = []
while True:
# pull in a line of output from every logger that has
# output ready to be read
loggers, _, _ = select.select(self.warning_loggers, [], [], 0)
closed_loggers = set()
for logger in loggers:
line = logger.readline()
# record any broken pipes (aka line == empty)
if len(line) == 0:
closed_loggers.add(logger)
continue
# parse out the warning
timestamp, msgtype, msg = line.split('\t', 2)
timestamp = int(timestamp)
# if the warning is valid, add it to the results
if self.warning_manager.is_valid(timestamp, msgtype):
warnings.append((timestamp, msg.strip()))
# stop listening to loggers that are closed
self.warning_loggers -= closed_loggers
# stop if none of the loggers have any output left
if not loggers:
break
# sort into timestamp order
warnings.sort()
return warnings
示例8: get_autosummary
def get_autosummary(names, state, no_signatures=False):
"""
Generate a proper table node for autosummary:: directive.
Parameters
----------
names : list of str
Names of Python objects to be imported and added to the table.
document : document
Docutils document object
"""
document = state.document
real_names = {}
warnings = []
prefixes = ['']
prefixes.insert(0, document.settings.env.currmodule)
table = nodes.table('')
group = nodes.tgroup('', cols=2)
table.append(group)
group.append(nodes.colspec('', colwidth=10))
group.append(nodes.colspec('', colwidth=90))
body = nodes.tbody('')
group.append(body)
def append_row(*column_texts):
row = nodes.row('')
for text in column_texts:
node = nodes.paragraph('')
vl = ViewList()
vl.append(text, '<autosummary>')
state.nested_parse(vl, 0, node)
try:
if isinstance(node[0], nodes.paragraph):
node = node[0]
except IndexError:
pass
row.append(nodes.entry('', node))
body.append(row)
for name in names:
try:
obj, real_name = import_by_name(name, prefixes=prefixes)
except ImportError:
warnings.append(document.reporter.warning(
'failed to import %s' % name))
append_row(":obj:`%s`" % name, "")
continue
real_names[name] = real_name
doc = get_doc_object(obj)
if doc['Summary']:
title = " ".join(doc['Summary'])
else:
title = ""
col1 = u":obj:`%s <%s>`" % (name, real_name)
if doc['Signature']:
sig = re.sub('^[^(\[]*', '', doc['Signature'].strip())
if '=' in sig:
# abbreviate optional arguments
sig = re.sub(r', ([a-zA-Z0-9_]+)=', r'[, \1=', sig, count=1)
sig = re.sub(r'\(([a-zA-Z0-9_]+)=', r'([\1=', sig, count=1)
sig = re.sub(r'=[^,)]+,', ',', sig)
sig = re.sub(r'=[^,)]+\)$', '])', sig)
# shorten long strings
sig = re.sub(r'(\[.{16,16}[^,]*?),.*?\]\)', r'\1, ...])', sig)
else:
sig = re.sub(r'(\(.{16,16}[^,]*?),.*?\)', r'\1, ...)', sig)
# make signature contain non-breaking spaces
col1 += u"\\ \u00a0" + unicode(sig).replace(u" ", u"\u00a0")
col2 = title
append_row(col1, col2)
return table, warnings, real_names
示例9: load_filenames
def load_filenames(args):
global gl
warnings = []
errors = []
for arg in args:
#{
for sampfname in glob.glob(arg):
if True:
try:
sampf = file(sampfname, "rb")
except IOError, msg:
errors.append(msg)
if sampf:
sampf.close()
samp = Samp()
# strip directory
basename = sampfname.replace("\\", "/")
basename = sampfname.split("/")[-1]
# strip ".wav" extension
basename = basename.split(".")
basename = ".".join(basename[0:-1])
basename = jtrans.tr(basename, DELIMS, " ")
# get layer name
parts = basename.split(" ")
if len(parts) <= abs(LAYER_LOC):
loc = LAYER_LOC
if loc >= 0:
loc += 1
print >>sys.stderr, (
"After splitting filename '%s' delimiters,"
% (basename))
print >>sys.stderr, (
"there aren't enough parts to find part number %d." % loc)
sys.exit(1)
layername = parts[LAYER_LOC]
# get note: might be MIDI number or note name
if len(parts) <= abs(NOTE_LOC):
loc = NOTE_LOC
if loc >= 0:
loc += 1
print >>sys.stderr, (
"After splitting filename '%s' at delimiters, there aren't"
% (basename))
print >>sys.stderr, (
"there aren't enough parts to find part number %d." % loc)
sys.exit(1)
notespec = parts[NOTE_LOC]
mnote = jmidi.notenum(notespec)
if mnote == None:
print >>sys.stderr, (
"Invalid MIDI note designation '%s' in '%s'"
% (notespec, basename))
sys.exit(1)
# print sampfname, mnote, layername, jmidi.mnote_name(mnote)[0]
samp.fname = sampfname
samp.mnote = mnote
samp.notename = jmidi.mnote_name(mnote, pad=None)
samp.layername = layername
if layername not in gl.layernum:
warnings.append("Sample for unconfigured layer '%s': %s"
% (samp.layername, samp.fname))
continue
samp.layer = gl.layernum[layername]
if samp.layer == None:
warnings.append("Sample for missing layer '%s': %s"
% (samp.layername, samp.fname))
continue
x = LO_KEY - MAX_NOTE_SHIFT
if (samp.mnote < max(0, LO_KEY - MAX_NOTE_SHIFT)
or samp.mnote > HI_KEY + MAX_NOTE_SHIFT):
warnings.append("Sample outside useful note range (%s): %s"
% (samp.notename, samp.fname))
continue
samp.char = None
gl.samps[sampfname] = samp
gl.grid[samp.layer][mnote] = samp
示例10: catch_warnings
error = None
warnings = []
with catch_warnings(record=True) as w:
try:
run_test(test, debug=debug)
except UnheardNoteError, e:
unheard = e
except MissingRecordingError, e:
missing_rec = e
except Exception:
error = sys.exc_info()
for warning in w:
if warning.category is ExtraNoteWarning:
warnings.append(warning.message)
if unheard is not None or error is not None or missing_rec is not None:
color_name = 'red!'
results['errors'] += 1
elif warnings:
color_name = 'yellow!'
results['warnings'] += 1
else:
color_name = 'green!'
results['ok'] += 1
print color(color_name, test.name)
for warning in warnings:
print ' %2d: Extra notes:' % warning.expectation, \
示例11: load_notebook
def load_notebook(resources=None, verbose=False, force=False, skip=False):
''' Prepare the IPython notebook for displaying Bokeh plots.
Args:
resources (Resource, optional) : a resource object describing how and where to load BokehJS from
verbose (bool, optional) : whether to report detailed settings (default: False)
force (bool, optional) : whether to skip IPython notebook check (default: False)
Returns:
None
'''
global _notebook_loaded
# It's possible the IPython folks will chance things in the future, `force` parameter
# provides an escape hatch as long as `displaypub` works
if not force:
notebook = False
try:
notebook = 'notebook' in get_ipython().config.IPKernelApp.parent_appname
except Exception:
pass
if not notebook:
raise RuntimeError('load_notebook only works inside an '
'IPython notebook, try using force=True.')
from .resources import INLINE
from .templates import NOTEBOOK_LOAD, RESOURCES
if resources is None:
resources = INLINE
plot_resources = RESOURCES.render(
js_raw = resources.js_raw,
css_raw = resources.css_raw,
js_files = resources.js_files,
css_files = resources.css_files,
)
if resources.mode == 'inline':
js_info = 'inline'
css_info = 'inline'
else:
js_info = resources.js_files[0] if len(resources.js_files) == 1 else resources.js_files
css_info = resources.css_files[0] if len(resources.css_files) == 1 else resources.css_files
warnings = ["Warning: " + msg['text'] for msg in resources.messages if msg['type'] == 'warn']
if _notebook_loaded:
warnings.append('Warning: BokehJS previously loaded')
_notebook_loaded = resources
html = NOTEBOOK_LOAD.render(
plot_resources = plot_resources,
logo_url = resources.logo_url,
verbose = verbose,
js_info = js_info,
css_info = css_info,
bokeh_version = __version__,
warnings = warnings,
skip = skip,
)
utils.publish_display_data({'text/html': html})
示例12: accumulateDeprecations
def accumulateDeprecations(message, category, stacklevel):
self.assertEqual(DeprecationWarning, category)
self.assertEqual(stacklevel, 2)
warnings.append(message)
示例13: migrate
def migrate(customer_id, username, password, key):
warnings = []
customer = masterdb.Customer.get(customer_id)
h = Pingdom(username, password, key)
# create the contacts
contacts = {}
for cid, contact in h.contacts.iteritems():
try: panopta_contact = masterdb.Contact.selectBy(customer=customer, fullname=contact.name)[0]
except:
timezone = "America/Chicago"
panopta_contact = masterdb.Contact(
customer=customer, fullname=contact.name,
timezone=timezone
)
warnings.append("contact '%s' created with default timezone of %s" % (
contact.name, timezone
))
channels = []
contacts[cid] = (panopta_contact, channels)
phone = getattr(contact, "cellphone", None)
if phone:
phone = "+"+phone
sms_type = masterdb.ContactType.selectBy(textkey="sms")[0]
try: masterdb.ContactChannel.selectBy(contact_info=phone, contact_type=sms_type)[0]
except:
channel = masterdb.ContactChannel(
contact_info=phone,
contact_type=sms_type,
contact=panopta_contact,
label=""
)
channels.append(channel)
email = getattr(contact, "email", None)
if email:
email_type = masterdb.ContactType.selectBy(textkey="email.html")[0]
try: masterdb.ContactChannel.selectBy(contact_info=email, contact_type=email_type)[0]
except:
channel = masterdb.ContactChannel(
contact_info=email,
contact_type=email_type,
contact=panopta_contact,
label=""
)
channels.append(channel)
# migrate the servers
try: server_group = masterdb.ServerGroup.selectBy(name="Pingdom")[0]
except: server_group = masterdb.ServerGroup(
name="Pingdom", server_group=None, customer=customer,
notification_schedule=None
)
for server in h.servers.values():
servers = masterdb.Server.selectBy(customer=customer, fqdn=server.hostname)
if not servers.count():
logging.info("creating server \"%s\"" % server.hostname)
panopta_server = masterdb.Server(
name = server.hostname,
fqdn = server.hostname,
last_known_ip = None,
customer = customer,
primary_node = masterdb.MonitorNode.get_closest_node(server.ip),
status = 'active',
notification_schedule=None,
server_group=server_group
)
else: panopta_server = servers[0]
# create the different notification schedules from the checks
schedules = {}
for check in server.checks:
if not check.notifications: continue
sk = [contact.id for contact in check.notifications]
sname = [contact.name for contact in check.notifications]
sname.sort()
sk.sort()
sk = [str(i) for i in sk]
if len(sname) > 3:
sname = sname[:3]
sname.append("and %d more" % len(sname) - 3)
sk = ",".join(sk)
sname = ", ".join(sname)
# create the notification schedule if it doesn't exist
#.........这里部分代码省略.........
示例14: wsdl_validate_params
def wsdl_validate_params(self, struct, value):
"""Validate the arguments (actual values) for the parameters structure.
Fail for any invalid arguments or type mismatches."""
errors = []
warnings = []
valid = True
# Determine parameter type
if type(struct) == type(value):
typematch = True
if not isinstance(struct, dict) and isinstance(value, dict):
typematch = True # struct can be a dict or derived (Struct)
else:
typematch = False
if struct == str:
struct = unicode # fix for py2 vs py3 string handling
if not isinstance(struct, (list, dict, tuple)) and struct in TYPE_MAP.keys():
if not type(value) == struct:
try:
struct(value) # attempt to cast input to parameter type
except:
valid = False
errors.append('Type mismatch for argument value. parameter(%s): %s, value(%s): %s' % (type(struct), struct, type(value), value))
elif isinstance(struct, list) and len(struct) == 1 and not isinstance(value, list):
# parameter can have a dict in a list: [{}] indicating a list is allowed, but not needed if only one argument.
next_valid, next_errors, next_warnings = self.wsdl_validate_params(struct[0], value)
if not next_valid:
valid = False
errors.extend(next_errors)
warnings.extend(next_warnings)
# traverse tree
elif isinstance(struct, dict):
if struct and value:
for key in value:
if key not in struct:
valid = False
errors.append('Argument key %s not in parameter. parameter: %s, args: %s' % (key, struct, value))
else:
next_valid, next_errors, next_warnings = self.wsdl_validate_params(struct[key], value[key])
if not next_valid:
valid = False
errors.extend(next_errors)
warnings.extend(next_warnings)
for key in struct:
if key not in value:
warnings.append('Parameter key %s not in args. parameter: %s, value: %s' % (key, struct, value))
elif struct and not value:
warnings.append('parameter keys not in args. parameter: %s, args: %s' % (struct, value))
elif not struct and value:
valid = False
errors.append('Args keys not in parameter. parameter: %s, args: %s' % (struct, value))
else:
pass
elif isinstance(struct, list):
struct_list_value = struct[0]
for item in value:
next_valid, next_errors, next_warnings = self.wsdl_validate_params(struct_list_value, item)
if not next_valid:
valid = False
errors.extend(next_errors)
warnings.extend(next_warnings)
elif not typematch:
valid = False
errors.append('Type mismatch. parameter(%s): %s, value(%s): %s' % (type(struct), struct, type(value), value))
return (valid, errors, warnings)
示例15: check_calib
def check_calib(self, frame_type="ALL"):
warnings = []
if frame_type=="BIAS":
if "data" not in self.cal_data["BIAS"]:
warnings.append("Warning: No bias image found - can't do bias subtraction. Continue anyway?")
elif frame_type=="DARK":
if len(self.cal_data["DARK"])==0:
warnings.append("Warning: No dark images found - can't account for dark current. Continue anyway?")
else:
for tag in self.cal_data["DARK"]:
if not self.cal_data["DARK"][tag]["master"]:
warnings.append("Warning: No bias frame applied to dark frame with exposure {}. Continue anyway?".format(tag))
elif frame_type=="Flat Field":
if len(self.cal_data["Flat Field"])==0:
warnings.append("Warning: No flat images found - can't account for detector sensitivity. Continue anyway?")
else:
for tag in self.cal_data["Flat Field"]:
if not self.cal_data["Flat Field"][tag]["master"]:
warnings.append("Warning: No bias frame applied to flat frame with filter \"{}\". Continue anyway?".format(tag))
elif frame_type=="ALL":
warnings.extend(self.check_calib("BIAS"))
warnings.extend(self.check_calib("DARK"))
warnings.extend(self.check_calib("Flat Field"))
else:
warnings.append("Warning: Unknown frame type. Continue anyway?")
return warnings