本文整理匯總了Python中numpy.percentile方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.percentile方法的具體用法?Python numpy.percentile怎麽用?Python numpy.percentile使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類numpy
的用法示例。
在下文中一共展示了numpy.percentile方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _add_bootstrapped_inputs
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import percentile [as 別名]
def _add_bootstrapped_inputs(self, base_algorithm, batch_sample_method,
nsamples, njobs_samples, percentile,
ts_byrow = False, ts_weighted = False):
assert (batch_sample_method == 'gamma') or (batch_sample_method == 'poisson')
assert isinstance(nsamples, int)
assert nsamples >= 1
self.batch_sample_method = batch_sample_method
self.nsamples = nsamples
self.njobs_samples = _check_njobs(njobs_samples)
if not isinstance(base_algorithm, list):
self.base_algorithm = self._make_bootstrapped(base_algorithm, percentile,
ts_byrow, ts_weighted)
else:
self.base_algorithm = [ \
self._make_bootstrapped(alg, percentile, ts_byrow, ts_weighted) \
for alg in base_algorithm]
示例2: test_seasonal_fdc_recorder
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import percentile [as 別名]
def test_seasonal_fdc_recorder(self):
"""
Test the FlowDurationCurveRecorder
"""
model = load_model("timeseries4.json")
df = pandas.read_csv(os.path.join(os.path.dirname(__file__), 'models', 'timeseries3.csv'),
parse_dates=True, dayfirst=True, index_col=0)
percentiles = np.linspace(20., 100., 5)
summer_flows = df.loc[pandas.Timestamp("2014-06-01"):pandas.Timestamp("2014-08-31"), :]
summer_fdc = np.percentile(summer_flows, percentiles, axis=0)
model.run()
rec = model.recorders["seasonal_fdc"]
assert_allclose(rec.fdc, summer_fdc)
示例3: reset_percentile
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import percentile [as 別名]
def reset_percentile(self, percentile=80):
"""
Set the upper confidence bound percentile to a custom number
Parameters
----------
percentile : int [0,100]
Percentile of the confidence interval to take.
Returns
-------
self : obj
This object
"""
assert (percentile > 0) and (percentile < 100)
if self.is_fitted:
self._oracles.reset_attribute("percentile", percentile)
self.base_algorithm.percentile = percentile
return self
示例4: __init__
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import percentile [as 別名]
def __init__(self, nchoices, percentile=80, fit_intercept=True,
lambda_=1.0, ucb_from_empty=False,
beta_prior='auto', smoothing=None, noise_to_smooth=True,
assume_unique_reward=False,
random_state=None, njobs=-1):
assert (percentile > 0) and (percentile < 100)
assert lambda_ > 0.
base = _LogisticUCB_n_TS_single(lambda_=float(lambda_),
fit_intercept=fit_intercept,
alpha=float(percentile),
ts=False)
self._add_common_params(base, beta_prior, smoothing, noise_to_smooth, njobs, nchoices,
False, None, False, assume_unique_reward,
random_state, assign_algo=True, prior_def_ucb=True,
force_unfit_predict = ucb_from_empty)
self.percentile = percentile
示例5: reset_threshold
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import percentile [as 別名]
def reset_threshold(self, threshold="auto"):
"""
Set the adaptive threshold to a custom number
Parameters
----------
threshold : float or "auto"
New threshold to use. If passing "auto", will set it
to 1.5/nchoices. Note that this threshold will still be
decayed if the object was initialized with ``decay_type="threshold"``,
and will still be updated if initialized with ``percentile != None``.
Returns
-------
self : obj
This object
"""
if isinstance(threshold, int):
threshold = float(threshold)
elif threshold == "auto":
threshold = 1.5 / self.nchoices
assert isinstance(threshold, float)
self.thr = threshold
return self
示例6: agg_func
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import percentile [as 別名]
def agg_func(request):
agg_func_name = request.param
if agg_func_name == "custom":
# When using custom you assign the function rather than a string.
agg_func_name = npy_func = custom_test_func
elif agg_func_name == "percentile":
agg_func_name = {
"func": "percentile",
"args": [95],
"kwargs": {}
}
npy_func = partial(np.percentile, q=95)
elif agg_func_name == "percentileofscore":
agg_func_name = {
"func": "percentileofscore",
"kwargs": {
"score": 0.5,
"kind": "rank"
}
}
npy_func = partial(percentileofscore_with_axis, score=0.5, kind="rank")
else:
npy_func = npy_funcs[agg_func_name]
return agg_func_name, npy_func
示例7: analyse_percentile
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import percentile [as 別名]
def analyse_percentile(cpdf, col):
percentile = [1, 5, 25, 50, 75, 95, 99]
r = [round(d, 3) for d in np.percentile(list(cpdf[col]), percentile)]
print(
"\n預測偏差分位:",
"\n1% 分位: ",
r[0],
"\n5% 分位: ",
r[1],
"\n25% 分位: ",
r[2],
"\n50% 分位: ",
r[3],
"\n75% 分位: ",
r[4],
"\n95% 分位: ",
r[5],
"\n99% 分位: ",
r[6],
)
示例8: __init__
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import percentile [as 別名]
def __init__(self, img, percentiles=[1, 99]):
"""Create norm that is linear between lower and upper percentile of img
Parameters
----------
img: array_like
Image to normalize
percentile: array_like, default=[1,99]
Lower and upper percentile to consider. Pixel values below will be
set to zero, above to saturated.
"""
assert len(percentiles) == 2
vmin, vmax = np.percentile(img, percentiles)
# solution for beta assumes flat spectrum at vmax
stretch = vmax - vmin
beta = stretch / np.sinh(1)
super().__init__(minimum=vmin, stretch=stretch, Q=beta)
示例9: _compute_data_weights_topk
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import percentile [as 別名]
def _compute_data_weights_topk(self, opts, density_ratios):
"""Put a uniform distribution on K points with largest prob real data.
This is a naiive heuristic which makes next GAN concentrate on those
points of the training set, which were classified correctly with
largest margins. I.e., out current mixture model is not capable of
generating points looking similar to these ones.
"""
threshold = np.percentile(density_ratios,
opts["topk_constant"]*100.0)
# Note that largest prob_real_data corresponds to smallest density
# ratios.
mask = density_ratios <= threshold
data_weights = np.zeros(self._data_num)
data_weights[mask] = 1.0 / np.sum(mask)
return data_weights
示例10: bench
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import percentile [as 別名]
def bench(f_):
timings_fwd = []
timings_bck = []
for _ in range(100):
with f_ as f:
tic = time.perf_counter()
f.forward()
torch.cuda.synchronize()
toc = time.perf_counter()
timings_fwd.append(toc - tic)
tic = time.perf_counter()
f.backward()
torch.cuda.synchronize()
toc = time.perf_counter()
timings_bck.append(toc - tic)
return (np.percentile(timings_fwd, [25, 50, 75]),
np.percentile(timings_bck, [25, 50, 75]))
示例11: scale_EVI
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import percentile [as 別名]
def scale_EVI(evi, periods, qmin=10, qmax=90):
""" Returns EVI scaled to upper and lower quantiles
Quantiles are calculated based on EVI within some year-to-year interval.
As part of finding the quantiles, EVI values not within the (0, 1) range
will be removed.
Args:
evi (np.ndarray): EVI values
periods (np.ndarray): intervals of years to group and scale together
qmin (float, optional): lower quantile for scaling (default: 10)
qmax (float, optional): upper quantile for scaling (default: 90)
Returns:
np.ndarray: scaled EVI array
"""
_evi = evi.copy()
for u in np.unique(periods):
index = np.where(periods == u)
evi_min = np.percentile(evi[index], qmin)
evi_max = np.percentile(evi[index], qmax)
_evi[index] = (evi[index] - evi_min) / (evi_max - evi_min)
return _evi
示例12: _nanquantile_ureduce_func
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import percentile [as 別名]
def _nanquantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear'):
"""
Private function that doesn't support extended axis or keepdims.
These methods are extended to this function using _ureduce
See nanpercentile for parameter usage
"""
if axis is None or a.ndim == 1:
part = a.ravel()
result = _nanquantile_1d(part, q, overwrite_input, interpolation)
else:
result = np.apply_along_axis(_nanquantile_1d, axis, a, q,
overwrite_input, interpolation)
# apply_along_axis fills in collapsed axis with results.
# Move that axis to the beginning to match percentile's
# convention.
if q.ndim != 0:
result = np.moveaxis(result, axis, 0)
if out is not None:
out[...] = result
return result
示例13: test_keepdims
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import percentile [as 別名]
def test_keepdims(self):
d = np.ones((3, 5, 7, 11))
assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape,
(1, 1, 1, 1))
assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape,
(1, 1, 7, 11))
assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape,
(1, 5, 7, 1))
assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape,
(3, 1, 7, 11))
assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape,
(1, 1, 1, 1))
assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape,
(1, 1, 7, 1))
assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3),
keepdims=True).shape, (2, 1, 1, 7, 1))
assert_equal(np.percentile(d, [1, 7], axis=(0, 3),
keepdims=True).shape, (2, 1, 5, 7, 1))
示例14: test_out
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import percentile [as 別名]
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
resout = np.zeros(3)
tgt = np.percentile(mat, 42, axis=1)
res = np.nanpercentile(nan_mat, 42, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
# 0-d output:
resout = np.zeros(())
tgt = np.percentile(mat, 42, axis=None)
res = np.nanpercentile(nan_mat, 42, axis=None, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
res = np.nanpercentile(nan_mat, 42, axis=(0, 1), out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
示例15: test_multiple_percentiles
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import percentile [as 別名]
def test_multiple_percentiles(self):
perc = [50, 100]
mat = np.ones((4, 3))
nan_mat = np.nan * mat
# For checking consistency in higher dimensional case
large_mat = np.ones((3, 4, 5))
large_mat[:, 0:2:4, :] = 0
large_mat[:, :, 3:] *= 2
for axis in [None, 0, 1]:
for keepdim in [False, True]:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "All-NaN slice encountered")
val = np.percentile(mat, perc, axis=axis, keepdims=keepdim)
nan_val = np.nanpercentile(nan_mat, perc, axis=axis,
keepdims=keepdim)
assert_equal(nan_val.shape, val.shape)
val = np.percentile(large_mat, perc, axis=axis,
keepdims=keepdim)
nan_val = np.nanpercentile(large_mat, perc, axis=axis,
keepdims=keepdim)
assert_equal(nan_val, val)
megamat = np.ones((3, 4, 5, 6))
assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))