本文整理匯總了Python中numpy.nanquantile方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.nanquantile方法的具體用法?Python numpy.nanquantile怎麽用?Python numpy.nanquantile使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類numpy
的用法示例。
在下文中一共展示了numpy.nanquantile方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _process_symmetric
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanquantile [as 別名]
def _process_symmetric(self, symmetric, clim, check_symmetric_max):
if symmetric is not None or clim is not None:
return symmetric
if is_xarray(self.data):
# chunks mean it's lazily loaded; nanquantile will eagerly load
if self.data.chunks:
return False
data = self.data[self.z]
if is_xarray_dataarray(data):
if data.size > check_symmetric_max:
return False
else:
return False
elif self._color_dim:
data = self.data[self._color_dim]
else:
return
cmin = np.nanquantile(data, 0.05)
cmax = np.nanquantile(data, 0.95)
return bool(cmin < 0 and cmax > 0)
示例2: test_regression
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanquantile [as 別名]
def test_regression(self):
ar = np.arange(24).reshape(2, 3, 4).astype(float)
ar[0][1] = np.nan
assert_equal(np.nanquantile(ar, q=0.5), np.nanpercentile(ar, q=50))
assert_equal(np.nanquantile(ar, q=0.5, axis=0),
np.nanpercentile(ar, q=50, axis=0))
assert_equal(np.nanquantile(ar, q=0.5, axis=1),
np.nanpercentile(ar, q=50, axis=1))
assert_equal(np.nanquantile(ar, q=[0.5], axis=1),
np.nanpercentile(ar, q=[50], axis=1))
assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1),
np.nanpercentile(ar, q=[25, 50, 75], axis=1))
示例3: test_basic
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanquantile [as 別名]
def test_basic(self):
x = np.arange(8) * 0.5
assert_equal(np.nanquantile(x, 0), 0.)
assert_equal(np.nanquantile(x, 1), 3.5)
assert_equal(np.nanquantile(x, 0.5), 1.75)
示例4: test_no_p_overwrite
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanquantile [as 別名]
def test_no_p_overwrite(self):
# this is worth retesting, because quantile does not make a copy
p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
p = p0.copy()
np.nanquantile(np.arange(100.), p, interpolation="midpoint")
assert_array_equal(p, p0)
p0 = p0.tolist()
p = p.tolist()
np.nanquantile(np.arange(100.), p, interpolation="midpoint")
assert_array_equal(p, p0)
示例5: test_nanquantile
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanquantile [as 別名]
def test_nanquantile(self):
self.check(np.nanquantile, 0.5)
o = np.nanquantile(self.q, 50 * u.percent)
expected = np.nanquantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
示例6: main
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanquantile [as 別名]
def main(args=None):
"""
Main function to generate the polarization plot.
"""
args = parse_arguments().parse_args(args)
pc1 = pd.read_table(args.pca, header=None, sep="\t",
dtype={0: "object", 1: "Int64", 2: "Int64", 3: "float32"})
pc1 = pc1.rename(columns={0: "chr", 1: "start", 2: "end", 3: "pc1"})
if args.outliers != 0:
quantile = [args.outliers / 100, (100 - args.outliers) / 100]
boundaries = np.nanquantile(pc1['pc1'].values.astype(float), quantile)
quantiled_bins = np.linspace(boundaries[0], boundaries[1],
args.quantile)
else:
quantile = [j / (args.quantile - 1) for j in range(0, args.quantile)]
quantiled_bins = np.nanquantile(pc1['pc1'].values.astype(float),
quantile)
pc1["quantile"] = np.searchsorted(quantiled_bins,
pc1['pc1'].values.astype(float),
side="right")
pc1.loc[pc1["pc1"] == np.nan]["quantile"] = args.quantile + 1
polarization_ratio = []
output_matrices = []
labels = []
for matrix in args.obsexp_matrices:
obs_exp = hm.hiCMatrix(matrix)
name = ".".join(matrix.split("/")[-1].split(".")[0:-1])
labels.append(name)
normalised_sum_per_quantile = count_interactions(obs_exp, pc1,
args.quantile,
args.offset)
normalised_sum_per_quantile = np.nan_to_num(normalised_sum_per_quantile)
if args.outputMatrix:
output_matrices.append(normalised_sum_per_quantile)
polarization_ratio.append(within_vs_between_compartments(
normalised_sum_per_quantile,
args.quantile))
if args.outputMatrix:
np.savez(args.outputMatrix, [matrix for matrix in output_matrices])
plot_polarization_ratio(
polarization_ratio, args.outputFileName, labels, args.quantile)
示例7: fit
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanquantile [as 別名]
def fit(self, X, y=None):
"""
Computes the quantiles for each column of ``X``.
:type X: pandas.DataFrame or numpy.ndarray
:param X: The column(s) from which the capping limit(s) will be computed.
:param y: Ignored.
:rtype: sklego.preprocessing.ColumnCapper
:returns: The fitted object.
:raises:
``ValueError`` if ``X`` contains non-numeric columns
"""
X = check_array(
X, copy=True, force_all_finite=False, dtype=FLOAT_DTYPES, estimator=self
)
# If X contains infs, we need to replace them by nans before computing quantiles
np.putmask(X, (X == np.inf) | (X == -np.inf), np.nan)
# There should be no column containing only nan cells at this point. If that's not the case,
# it means that the user asked ColumnCapper to fit some column containing only nan or inf cells.
nans_mask = np.isnan(X)
invalid_columns_mask = (
nans_mask.sum(axis=0) == X.shape[0]
) # Contains as many nans as rows
if invalid_columns_mask.any():
raise ValueError(
"ColumnCapper cannot fit columns containing only inf/nan values"
)
q = [quantile_limit / 100 for quantile_limit in self.quantile_range]
self.quantiles_ = np.nanquantile(
a=X, q=q, axis=0, overwrite_input=True, interpolation=self.interpolation
)
# Saving the number of columns to ensure coherence between fit and transform inputs
self.n_columns_ = X.shape[1]
return self
示例8: map_cdf
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import nanquantile [as 別名]
def map_cdf(
x: xr.DataArray,
y: xr.DataArray,
y_value: xr.DataArray,
*,
group: Union[str, Grouper] = "time",
skipna: bool = False,
):
"""Return the value in `x` with the same CDF as `y_value` in `y`.
Parameters
----------
x : xr.DataArray
Values from which to pick
y : xr.DataArray
Reference values giving the ranking
y_value : float, array
Value within the support of `y`.
dim : str
Dimension along which to compute quantile.
Returns
-------
array
Quantile of `x` with the same CDF as `y_value` in `y`.
"""
def _map_cdf_1d(x, y, y_value, skipna=False):
q = _ecdf_1d(y, y_value)
_func = np.nanquantile if skipna else np.quantile
return _func(x, q=q)
def _map_cdf_group(gr, y_value, dim=["time"], skipna=False):
return xr.apply_ufunc(
_map_cdf_1d,
gr.x,
gr.y,
input_core_dims=[dim] * 2,
output_core_dims=[["x"]],
vectorize=True,
keep_attrs=True,
kwargs={"y_value": y_value, "skipna": skipna},
dask="parallelized",
output_dtypes=[gr.x.dtype],
)
return group.apply(
_map_cdf_group, {"x": x, "y": y}, y_value=np.atleast_1d(y_value), skipna=skipna,
)