本文整理汇总了Python中warnings.filterwarnings函数的典型用法代码示例。如果您正苦于以下问题:Python filterwarnings函数的具体用法?Python filterwarnings怎么用?Python filterwarnings使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了filterwarnings函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: django_tests
def django_tests(verbosity, interactive, failfast, test_labels):
from django.conf import settings
state = setup(verbosity, test_labels)
extra_tests = []
# Run the test suite, including the extra validation tests.
from django.test.utils import get_runner
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
)
# Catch warnings thrown in test DB setup -- remove in Django 1.9
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
"Custom SQL location '<app_label>/models/sql' is deprecated, "
"use '<app_label>/sql' instead.",
PendingDeprecationWarning
)
failures = test_runner.run_tests(
test_labels or get_installed(), extra_tests=extra_tests)
teardown(state)
return failures
示例2: test_ccode_results_named_ordered
def test_ccode_results_named_ordered():
x, y, z = symbols('x,y,z')
B, C = symbols('B,C')
A = MatrixSymbol('A', 1, 3)
expr1 = Equality(A, Matrix([[1, 2, x]]))
expr2 = Equality(C, (x + y)*z)
expr3 = Equality(B, 2*x)
name_expr = ("test", [expr1, expr2, expr3])
expected = (
'#include "test.h"\n'
'#include <math.h>\n'
'void test(double x, double *C, double z, double y, double *A, double *B) {\n'
' (*C) = z*(x + y);\n'
' A[0] = 1;\n'
' A[1] = 2;\n'
' A[2] = x;\n'
' (*B) = 2*x;\n'
'}\n'
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=SymPyDeprecationWarning)
result = codegen(name_expr, "c", "test", header=False, empty=False,
argument_sequence=(x, C, z, y, A, B))
source = result[0][1]
assert source == expected
示例3: log_likelihood_dataset
def log_likelihood_dataset(f, dataset, log_likelihood_datapoint, logger, ll_fun_wants_log_domain):
"""
f : log-domain potentials
ll_fun_wants_log_domain : whether or not the log-likelihood function needs f to be in log-domain (this is false only for the native chain LL implementation)
"""
#print("f.dtype : %s" % f.dtype)
#import hashlib
#print("before " + str(int(hashlib.sha1(f.view(np.uint8)).hexdigest(), 16)))
if not ll_fun_wants_log_domain:
try:
with warnings.catch_warnings():
warnings.filterwarnings('error')
f = np.exp(f) # changing semantics of f instead of inserting if's on edge_pot=... and node_pot=...
except RuntimeWarning as rtw:
logger.debug("RuntimeWarning: " + str(rtw))
#print("after " + str(int(hashlib.sha1(f.view(np.uint8)).hexdigest(), 16)))
ll = 0
edge_pot = f[dataset.binaries]
# print(dataset.binaries)
# print(log_edge_pot)
#assert(log_edge_pot.shape == (dataset.n_labels, dataset.n_labels))
for n in range(dataset.N):
node_pot = f[dataset.unaries[n]]
ll_datapoint = log_likelihood_datapoint(node_pot, edge_pot, dataset.Y[n], dataset.object_size[n], dataset.n_labels)
# if (ll_datapoint >0):
# info_string = ""
# info_string += 'log_likelihood_datapoint as computed: %g\n' % ll_datapoint
# info_string += 'n: %g\n' % n
# info_string += 'node_pot.tolist(): %s\n' % node_pot.tolist()
# info_string += 'edge_pot.tolist(): %s\n' % edge_pot.tolist()
# info_string += 'dataset.Y[n]: %s\n' % dataset.Y[n].tolist()
# raise Exception("positive log-likelihood is not allowed. More information:\n" + info_string)
ll += ll_datapoint
# in grid case, object_size will be ignored
return ll # LL should not be scaled !
示例4: _suppress_scipy_warnings
def _suppress_scipy_warnings():
# Infiltrate warnings if necessary
numpy_ver = versions['numpy']
scipy_ver = versions['scipy']
# There is way too much deprecation warnings spit out onto the
# user. Lets assume that they should be fixed by scipy 0.7.0 time
if not __debug__ or (__debug__ and 'PY' not in debug.active):
filter_lines = []
if "0.6.0" <= scipy_ver and scipy_ver < "0.7.0" \
and numpy_ver > "1.1.0":
if __debug__:
debug('EXT', "Setting up filters for numpy DeprecationWarnings "
"regarding scipy < 0.7.0")
filter_lines += [
('NumpyTest will be removed in the next release.*',
DeprecationWarning),
('PyArray_FromDims: use PyArray_SimpleNew.',
DeprecationWarning),
('PyArray_FromDimsAndDataAndDescr: use PyArray_NewFromDescr.',
DeprecationWarning),
# Trick re.match, since in warnings absent re.DOTALL in re.compile
('[\na-z \t0-9]*The original semantics of histogram is scheduled to be.*'
'[\na-z \t0-9]*', Warning) ]
if scipy_ver >= "0.15":
filter_lines += [("`scipy.weave` is deprecated, use `weave` instead!",
DeprecationWarning)]
if scipy_ver >= "0.16":
# scipy deprecated it but statsmodels still import it for now
filter_lines += [("`scipy.linalg.calc_lwork` is deprecated!",
DeprecationWarning)]
for f, w in filter_lines:
warnings.filterwarnings('ignore', f, w)
示例5: main
def main(argv):
pd.set_option('display.width', 200)
pd.set_option('display.height', 500)
warnings.filterwarnings("ignore")
global file_path, RMSLE_scorer
# RMSLE_scorer
RMSLE_scorer = metrics.make_scorer(RMSLE, greater_is_better = False)
if(platform.system() == "Windows"):
file_path = 'C:/Python/Others/data/Kaggle/Caterpillar_Tube_Pricing/'
else:
file_path = '/home/roshan/Desktop/DS/Others/data/Kaggle/Caterpillar_Tube_Pricing/'
########################################################################################################################
#Read the input file , munging and splitting the data to train and test
########################################################################################################################
Train_DS = pd.read_csv(file_path+'competition_data/train_set.csv',sep=',')
Actual_DS = pd.read_csv(file_path+'competition_data/test_set.csv',sep=',')
Tube_DS = pd.read_csv(file_path+'competition_data/tube.csv',sep=',')
Bill_DS = pd.read_csv(file_path+'competition_data/bill_of_materials.csv',sep=',')
Spec_DS = pd.read_csv(file_path+'competition_data/specs.csv',sep=',')
Tube_End_DS = pd.read_csv(file_path+'competition_data/tube_end_form.csv',sep=',')
Comp_DS = pd.read_csv(file_path+'competition_data/components_2.csv',sep=',')
Sample_DS = pd.read_csv(file_path+'sample_submission.csv',sep=',')
Train_DS, Actual_DS, y = Data_Munging(Train_DS,Actual_DS,Tube_DS,Bill_DS,Spec_DS,Tube_End_DS, Comp_DS)
pred_Actual = RFR_Regressor(Train_DS, y, Actual_DS, Sample_DS, grid=False)
示例6: __init__
def __init__(self,path_to_corpora):
## Built-in dictionary for word-parser, and path to corpora
self.stopword = stopwords.words('english')
self.path_to_corpora = path_to_corpora
warnings.filterwarnings("ignore")
print 'Initialize LDAModel....path to corpora : ',path_to_corpora
## Hyperparameters for training model
# Minimun length of single document
self.min_length = 200
# Num_topics in LDA
self.num_topics = 90
# Filter out tokens that appear in less than `no_below` documents (absolute number)
self.no_below_this_number = 50
# Filter out tokens that appear in more than `no_above` documents (fraction of total corpus size, *not* absolute number).
self.no_above_fraction_of_doc = 0.2
# Remove topic which weights less than this number
self.remove_topic_so_less = 0.05
# Number of iterations in training LDA model, the less the documents in total, the more the iterations for LDA model to converge
self.num_of_iterations = 1000
# Number of passes in the model
self.passes = 3
#Print all hyperparameters
parameters = {}
parameters['min_length'] = self.min_length
parameters['num_topics'] = self.num_topics
parameters['no_below_this_number'] = self.no_below_this_number
parameters['no_above_fraction_of_doc'] = self.no_above_fraction_of_doc
parameters['remove_topic_so_less'] = self.remove_topic_so_less
parameters['num_of_iterations'] = self.num_of_iterations
parameters['passes'] = self.passes
for k in parameters:
print "Parameter for {0} is {1}".format(k,parameters[k])
print 'Finished initializing....'
示例7: test_rcparams_init
def test_rcparams_init():
with pytest.raises(ValueError):
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='.*(validate)',
category=UserWarning)
mpl.RcParams({'figure.figsize': (3.5, 42, 1)})
示例8: get_build_from_file
def get_build_from_file (platform, file_name, name):
gub_name = file_name.replace (os.getcwd () + '/', '')
logging.verbose ('reading spec: %(gub_name)s\n' % locals ())
# Ugh, FIXME
# This loads gub/specs/darwin/python.py in PYTHON. namespace,
# overwriting the PYTHON. namespace from gub/specs/python.py
# Current workaround: always/also use __darwin etc. postfixing
# of class names, also in specs/darwin/ etc.
warnings.filterwarnings ('ignore', '''Parent module 'python-2' ''')
module = misc.load_module (file_name, name)
# cross/gcc.py:Gcc will be called: cross/Gcc.py,
# to distinguish from specs/gcc.py:Gcc.py
base = os.path.basename (name)
class_name = ((base[0].upper () + base[1:])
.replace ('-', '_')
.replace ('.', '_')
.replace ('++', '_xx_')
.replace ('+', '_x_')
+ ('-' + platform).replace ('-', '__'))
logging.debug ('LOOKING FOR: %(class_name)s\n' % locals ())
cls = misc.most_significant_in_dict (module.__dict__, class_name, '__')
if (platform == 'tools32'
and (not cls or issubclass (cls, target.AutoBuild))):
cls = misc.most_significant_in_dict (module.__dict__, class_name.replace ('tools32', 'tools'), '__')
if ((platform == 'tools' or platform == 'tools32')
and (issubclass (cls, target.AutoBuild)
and not issubclass (cls, tools.AutoBuild)
and not issubclass (cls, tools32.AutoBuild))):
cls = None
return cls
示例9: test_5_save_host_keys
def test_5_save_host_keys(self):
"""
verify that SSHClient correctly saves a known_hosts file.
"""
warnings.filterwarnings('ignore', 'tempnam.*')
host_key = paramiko.RSAKey.from_private_key_file(test_path('test_rsa.key'))
public_host_key = paramiko.RSAKey(data=host_key.asbytes())
fd, localname = mkstemp()
os.close(fd)
client = paramiko.SSHClient()
self.assertEquals(0, len(client.get_host_keys()))
host_id = '[%s]:%d' % (self.addr, self.port)
client.get_host_keys().add(host_id, 'ssh-rsa', public_host_key)
self.assertEquals(1, len(client.get_host_keys()))
self.assertEquals(public_host_key, client.get_host_keys()[host_id]['ssh-rsa'])
client.save_host_keys(localname)
with open(localname) as fd:
assert host_id in fd.read()
os.unlink(localname)
示例10: test_tmpnam
def test_tmpnam(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning,
r"test_os$")
warnings.filterwarnings("ignore", "tmpnam", DeprecationWarning)
name = os.tmpnam()
if sys.platform in ("win32",):
# The Windows tmpnam() seems useless. From the MS docs:
#
# The character string that tmpnam creates consists of
# the path prefix, defined by the entry P_tmpdir in the
# file STDIO.H, followed by a sequence consisting of the
# digit characters '0' through '9'; the numerical value
# of this string is in the range 1 - 65,535. Changing the
# definitions of L_tmpnam or P_tmpdir in STDIO.H does not
# change the operation of tmpnam.
#
# The really bizarre part is that, at least under MSVC6,
# P_tmpdir is "\\". That is, the path returned refers to
# the root of the current drive. That's a terrible place to
# put temp files, and, depending on privileges, the user
# may not even be able to open a file in the root directory.
self.assertFalse(os.path.exists(name),
"file already exists for temporary file")
else:
self.check_tempfile(name)
示例11: test_norm_hash_name
def test_norm_hash_name(self):
"test norm_hash_name()"
from itertools import chain
from passlib.utils.pbkdf2 import norm_hash_name, _nhn_hash_names
# test formats
for format in self.ndn_formats:
norm_hash_name("md4", format)
self.assertRaises(ValueError, norm_hash_name, "md4", None)
self.assertRaises(ValueError, norm_hash_name, "md4", "fake")
# test types
self.assertEqual(norm_hash_name(u("MD4")), "md4")
self.assertEqual(norm_hash_name(b("MD4")), "md4")
self.assertRaises(TypeError, norm_hash_name, None)
# test selected results
with catch_warnings():
warnings.filterwarnings("ignore", ".*unknown hash")
for row in chain(_nhn_hash_names, self.ndn_values):
for idx, format in enumerate(self.ndn_formats):
correct = row[idx]
for value in row:
result = norm_hash_name(value, format)
self.assertEqual(result, correct, "name=%r, format=%r:" % (value, format))
示例12: test_warnings_on_cleanup
def test_warnings_on_cleanup(self) -> None:
# Two kinds of warning on shutdown
# Issue 10888: may write to stderr if modules are nulled out
# ResourceWarning will be triggered by __del__
with self.do_create() as dir:
if os.sep != '\\':
# Embed a backslash in order to make sure string escaping
# in the displayed error message is dealt with correctly
suffix = '\\check_backslash_handling'
else:
suffix = ''
d = self.do_create(dir=dir, suf=suffix)
#Check for the Issue 10888 message
modules = [os, os.path]
if has_stat:
modules.append(stat)
with support.captured_stderr() as err:
with NulledModules(*modules):
d.cleanup()
message = err.getvalue().replace('\\\\', '\\')
self.assertIn("while cleaning up", message)
self.assertIn(d.name, message)
# Check for the resource warning
with support.check_warnings(('Implicitly', ResourceWarning), quiet=False):
warnings.filterwarnings("always", category=ResourceWarning)
d.__del__()
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after __del__" % d.name)
示例13: test_N_put_without_confirm
def test_N_put_without_confirm(self):
"""
verify that get/put work without confirmation.
"""
warnings.filterwarnings('ignore', 'tempnam.*')
"""
localname = os.tempnam()
text = 'All I wanted was a plastic bunny rabbit.\n'
f = open(localname, 'wb')
f.write(text)
f.close()
"""
text = b'All I wanted was a plastic bunny rabbit.\n'
f = tempfile.NamedTemporaryFile(delete=False)
localname = f.name
f.write(text)
f.close()
saved_progress = []
def progress_callback(x, y):
saved_progress.append((x, y))
res = sftp.put(localname, FOLDER + '/bunny.txt', progress_callback, False)
self.assertEquals(SFTPAttributes().attr, res.attr)
f = sftp.open(FOLDER + '/bunny.txt', 'r')
self.assertEquals(text, f.read(128))
f.close()
self.assertEquals((41, 41), saved_progress[-1])
os.unlink(localname)
sftp.unlink(FOLDER + '/bunny.txt')
示例14: request
def request(self, method, amp, path='/', **kwargs):
LOG.debug("request url %s", path)
_request = getattr(self.session, method.lower())
_url = self._base_url(amp.lb_network_ip) + path
LOG.debug("request url " + _url)
timeout_tuple = (CONF.haproxy_amphora.rest_request_conn_timeout,
CONF.haproxy_amphora.rest_request_read_timeout)
reqargs = {
'verify': CONF.haproxy_amphora.server_ca,
'url': _url,
'timeout': timeout_tuple, }
reqargs.update(kwargs)
headers = reqargs.setdefault('headers', {})
headers['User-Agent'] = OCTAVIA_API_CLIENT
self.ssl_adapter.uuid = amp.id
# Keep retrying
for a in six.moves.xrange(CONF.haproxy_amphora.connection_max_retries):
try:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="A true SSLContext object is not available"
)
r = _request(**reqargs)
except (requests.ConnectionError, requests.Timeout):
LOG.warning(_LW("Could not connect to instance. Retrying."))
time.sleep(CONF.haproxy_amphora.connection_retry_interval)
if a == CONF.haproxy_amphora.connection_max_retries - 1:
raise driver_except.TimeOutException()
else:
return r
raise driver_except.UnavailableException()
示例15: capture_glib_warnings
def capture_glib_warnings(allow_warnings=False, allow_criticals=False):
"""Temporarily suppress glib warning output and record them.
The test suite is run with G_DEBUG="fatal-warnings fatal-criticals"
by default. Setting allow_warnings and allow_criticals will temporarily
allow warnings or criticals without terminating the test run.
"""
old_mask = GLib.log_set_always_fatal(GLib.LogLevelFlags(0))
new_mask = old_mask
if allow_warnings:
new_mask &= ~GLib.LogLevelFlags.LEVEL_WARNING
if allow_criticals:
new_mask &= ~GLib.LogLevelFlags.LEVEL_CRITICAL
GLib.log_set_always_fatal(GLib.LogLevelFlags(new_mask))
GLibWarning = gi._gi._gobject.Warning
try:
with warnings.catch_warnings(record=True) as warn:
warnings.filterwarnings('always', category=GLibWarning)
yield warn
finally:
GLib.log_set_always_fatal(old_mask)