本文整理汇总了Python中warnings.catch_warnings函数的典型用法代码示例。如果您正苦于以下问题:Python catch_warnings函数的具体用法?Python catch_warnings怎么用?Python catch_warnings使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了catch_warnings函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testWarnings
def testWarnings(self):
# Smaller than the threshold: no warning.
c_sparse = ops.IndexedSlices(array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32),
constant([4, 4, 4, 4]))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(0, len(w))
# Greater than or equal to the threshold: warning.
c_sparse = ops.IndexedSlices(array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32),
constant([100, 100, 100, 100]))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"with 100000000 elements. This may consume a large amount of memory."
in str(w[0].message))
# Unknown dense shape: warning.
c_sparse = ops.IndexedSlices(array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"of unknown shape. This may consume a large amount of memory."
in str(w[0].message))
示例2: test_long_cache_keys_shortened
def test_long_cache_keys_shortened(self):
cache_settings = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': os.path.join(TOP_DIR, 'test.cache'),
}
}
long_key_string = "X" * 251
with override_settings(CACHES=cache_settings):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set(long_key_string, "hello cached world")
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
# Activate optional cache key length checker
cache_settings['default']['KEY_FUNCTION'] = 'mainsite.utils.filter_cache_key'
with override_settings(CACHES=cache_settings):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set(long_key_string, "hello cached world")
self.assertEqual(len(w), 0)
retrieved = cache.get(long_key_string)
self.assertEqual(retrieved, "hello cached world")
示例3: test_calculate_chpi_positions
def test_calculate_chpi_positions():
"""Test calculation of cHPI positions
"""
trans, rot, t = head_pos_to_trans_rot_t(read_head_pos(pos_fname))
with warnings.catch_warnings(record=True):
raw = Raw(chpi_fif_fname, allow_maxshield=True, preload=True)
t -= raw.first_samp / raw.info['sfreq']
quats = _calculate_chpi_positions(raw, verbose='debug')
trans_est, rot_est, t_est = head_pos_to_trans_rot_t(quats)
_compare_positions((trans, rot, t), (trans_est, rot_est, t_est), 0.003)
# degenerate conditions
raw_no_chpi = Raw(test_fif_fname)
assert_raises(RuntimeError, _calculate_chpi_positions, raw_no_chpi)
raw_bad = raw.copy()
for d in raw_bad.info['dig']:
if d['kind'] == FIFF.FIFFV_POINT_HPI:
d['coord_frame'] = 999
break
assert_raises(RuntimeError, _calculate_chpi_positions, raw_bad)
raw_bad = raw.copy()
for d in raw_bad.info['dig']:
if d['kind'] == FIFF.FIFFV_POINT_HPI:
d['r'] = np.ones(3)
raw_bad.crop(0, 1., copy=False)
with warnings.catch_warnings(record=True): # bad pos
with catch_logging() as log_file:
_calculate_chpi_positions(raw_bad, verbose=True)
# ignore HPI info header and [done] footer
for line in log_file.getvalue().strip().split('\n')[4:-1]:
assert_true('0/5 good' in line)
示例4: test_misspecifications
def test_misspecifications():
# Tests for model specification and misspecification exceptions
endog = np.arange(20).reshape(10,2)
# Bad trend specification
assert_raises(ValueError, varmax.VARMAX, endog, order=(1,0), trend='')
# Bad error_cov_type specification
assert_raises(ValueError, varmax.VARMAX, endog, order=(1,0), error_cov_type='')
# Bad order specification
assert_raises(ValueError, varmax.VARMAX, endog, order=(0,0))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
varmax.VARMAX(endog, order=(1,1))
# Warning with VARMA specification
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
varmax.VARMAX(endog, order=(1,1))
message = ('Estimation of VARMA(p,q) models is not generically robust,'
' due especially to identification issues.')
assert_equal(str(w[0].message), message)
warnings.resetwarnings()
示例5: test_popen
def test_popen(self):
mswindows = (sys.platform == "win32")
if mswindows:
command = '"{}" -c "print(\'Hello\')"'.format(sys.executable)
else:
command = "'{}' -c 'print(\"Hello\")'".format(sys.executable)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with platform.popen(command) as stdout:
hello = stdout.read().strip()
stdout.close()
self.assertEqual(hello, "Hello")
data = 'plop'
if mswindows:
command = '"{}" -c "import sys; data=sys.stdin.read(); exit(len(data))"'
else:
command = "'{}' -c 'import sys; data=sys.stdin.read(); exit(len(data))'"
command = command.format(sys.executable)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with platform.popen(command, 'w') as stdin:
stdout = stdin.write(data)
ret = stdin.close()
self.assertIsNotNone(ret)
if os.name == 'nt':
returncode = ret
else:
returncode = ret >> 8
self.assertEqual(returncode, len(data))
示例6: test_read_epochs_bad_events
def test_read_epochs_bad_events():
"""Test epochs when events are at the beginning or the end of the file
"""
# Event at the beginning
epochs = Epochs(
raw, np.array([[raw.first_samp, 0, event_id]]), event_id, tmin, tmax, picks=picks, baseline=(None, 0)
)
with warnings.catch_warnings(record=True):
evoked = epochs.average()
epochs = Epochs(
raw, np.array([[raw.first_samp, 0, event_id]]), event_id, tmin, tmax, picks=picks, baseline=(None, 0)
)
epochs.drop_bad_epochs()
with warnings.catch_warnings(record=True):
evoked = epochs.average()
# Event at the end
epochs = Epochs(
raw, np.array([[raw.last_samp, 0, event_id]]), event_id, tmin, tmax, picks=picks, baseline=(None, 0)
)
with warnings.catch_warnings(record=True):
evoked = epochs.average()
assert evoked
warnings.resetwarnings()
示例7: test_evoked_io_from_epochs
def test_evoked_io_from_epochs():
"""Test IO of evoked data made from epochs
"""
# offset our tmin so we don't get exactly a zero value when decimating
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
epochs = Epochs(raw, events[:4], event_id, tmin + 0.011, tmax, picks=picks, baseline=(None, 0), decim=5)
assert_true(len(w) == 1)
evoked = epochs.average()
evoked.save(op.join(tempdir, "evoked-ave.fif"))
evoked2 = read_evokeds(op.join(tempdir, "evoked-ave.fif"))[0]
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1 / evoked.info["sfreq"])
# now let's do one with negative time
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
epochs = Epochs(raw, events[:4], event_id, 0.1, tmax, picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.save(op.join(tempdir, "evoked-ave.fif"))
evoked2 = read_evokeds(op.join(tempdir, "evoked-ave.fif"))[0]
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
# should be equivalent to a cropped original
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
epochs = Epochs(raw, events[:4], event_id, -0.2, tmax, picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.crop(0.099, None)
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
示例8: _check_predict_proba
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in xrange(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
示例9: _check_roundtrip
def _check_roundtrip(self, frame):
_skip_if_no_MySQLdb()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.db.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.write_frame(frame, name='test_table', con=self.db, flavor='mysql')
result = sql.read_frame("select * from test_table", self.db)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
result.index.name = frame.index.name
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
index = Index(lrange(len(frame2))) + 10
frame2['Idx'] = index
drop_sql = "DROP TABLE IF EXISTS test_table2"
cur = self.db.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.write_frame(frame2, name='test_table2', con=self.db, flavor='mysql')
result = sql.read_frame("select * from test_table2", self.db,
index_col='Idx')
expected = frame.copy()
# HACK! Change this once indexes are handled properly.
expected.index = index
expected.index.names = result.index.names
tm.assert_frame_equal(expected, result)
示例10: test_class_weight_classifiers
def test_class_weight_classifiers():
# test that class_weight works and that the semantics are consistent
classifiers = all_estimators(type_filter="classifier")
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers if "class_weight" in c[1]().get_params().keys()]
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)
for name, Classifier in classifiers:
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
continue
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
continue
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.9)
示例11: test_graph_iterative
def test_graph_iterative(self):
graph = MigrationGraph()
root = ("app_a", "1")
graph.add_node(root, None)
expected = [root]
for i in range(2, 1000):
parent = ("app_a", str(i - 1))
child = ("app_a", str(i))
graph.add_node(child, None)
graph.add_dependency(str(i), child, parent)
expected.append(child)
leaf = expected[-1]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', RuntimeWarning)
forwards_plan = graph.forwards_plan(leaf)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(str(w[-1].message), RECURSION_DEPTH_WARNING)
self.assertEqual(expected, forwards_plan)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', RuntimeWarning)
backwards_plan = graph.backwards_plan(root)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(str(w[-1].message), RECURSION_DEPTH_WARNING)
self.assertEqual(expected[::-1], backwards_plan)
示例12: test_sparse_randomized_pca_inverse
def test_sparse_randomized_pca_inverse():
"""Test that RandomizedPCA is inversible on sparse data"""
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= 0.00001 # make middle component relatively small
# no large means because the sparse version of randomized pca does not do
# centering to avoid breaking the sparsity
X = csr_matrix(X)
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DeprecationWarning)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
assert_equal(len(w), 1)
assert_equal(w[0].category, DeprecationWarning)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X.todense(), Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DeprecationWarning)
pca = RandomizedPCA(n_components=2, whiten=True, random_state=0).fit(X)
assert_equal(len(w), 1)
assert_equal(w[0].category, DeprecationWarning)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X.todense() - Y_inverse) / np.abs(X).mean()).max()
# XXX: this does not seam to work as expected:
assert_almost_equal(relative_max_delta, 0.91, decimal=2)
示例13: test_random_pair_match
def test_random_pair_match(self) :
self.assertRaises(ValueError, dedupe.core.randomPairsMatch, 1, 0, 10)
self.assertRaises(ValueError, dedupe.core.randomPairsMatch, 0, 0, 10)
self.assertRaises(ValueError, dedupe.core.randomPairsMatch, 0, 1, 10)
assert len(dedupe.core.randomPairsMatch(100, 100, 100)) == 100
assert len(dedupe.core.randomPairsMatch(10, 10, 99)) == 99
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
pairs = dedupe.core.randomPairsMatch(10, 10, 200)
assert len(w) == 1
assert str(w[-1].message) == "Requested sample of size 200, only returning 100 possible pairs"
assert len(pairs) == 100
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
pairs = dedupe.core.randomPairsMatch(10, 10, 200)
assert len(w) == 1
assert str(w[-1].message) == "Requested sample of size 200, only returning 100 possible pairs"
random.seed(123)
numpy.random.seed(123)
pairs = dedupe.core.randomPairsMatch(10, 10, 10)
assert pairs == set([(7, 3), (3, 3), (2, 9), (6, 0), (2, 0),
(1, 9), (9, 4), (0, 4), (1, 0), (1, 1)])
示例14: test_random_pair
def test_random_pair(self) :
self.assertRaises(ValueError, dedupe.core.randomPairs, 1, 10)
assert dedupe.core.randomPairs(10, 10).any()
random.seed(123)
numpy.random.seed(123)
random_pairs = dedupe.core.randomPairs(10, 5)
assert numpy.array_equal(random_pairs,
numpy.array([[ 0, 3],
[ 3, 8],
[ 4, 9],
[ 5, 9],
[ 2, 3]]))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
dedupe.core.randomPairs(10, 10**6)
assert len(w) == 1
assert str(w[-1].message) == "Requested sample of size 1000000, only returning 45 possible pairs"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
dedupe.core.randomPairs(10**40, 10)
assert len(w) == 2
assert str(w[0].message) == "There may be duplicates in the sample"
assert "Asked to sample pairs from" in str(w[1].message)
random.seed(123)
numpy.random.seed(123)
assert numpy.array_equal(dedupe.core.randomPairs(11**9, 1),
numpy.array([[1228959102, 1840268610]]))
示例15: test_deprecated_score_func
def test_deprecated_score_func():
# test that old deprecated way of passing a score / loss function is still
# supported
X, y = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC(random_state=0)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X[:180], y[:180])
y_pred = cv.predict(X[180:])
C = cv.best_estimator_.C
clf = LinearSVC(random_state=0)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, score_func=f1_score)
with warnings.catch_warnings(record=True):
# catch deprecation warning
cv.fit(X[:180], y[:180])
y_pred_func = cv.predict(X[180:])
C_func = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred_func)
assert_equal(C, C_func)
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
clf = LinearSVC(random_state=0)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, loss_func=f1_loss)
with warnings.catch_warnings(record=True):
# catch deprecation warning
cv.fit(X[:180], y[:180])
y_pred_loss = cv.predict(X[180:])
C_loss = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred_loss)
assert_equal(C, C_loss)