本文整理匯總了Python中xarray.concat方法的典型用法代碼示例。如果您正苦於以下問題:Python xarray.concat方法的具體用法?Python xarray.concat怎麽用?Python xarray.concat使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類xarray
的用法示例。
在下文中一共展示了xarray.concat方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _bounds_from_array
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import concat [as 別名]
def _bounds_from_array(arr, dim_name, bounds_name):
"""Get the bounds of an array given its center values.
E.g. if lat-lon grid center lat/lon values are known, but not the
bounds of each grid box. The algorithm assumes that the bounds
are simply halfway between each pair of center values.
"""
# TODO: don't assume needed dimension is in axis=0
# TODO: refactor to get rid of repetitive code
spacing = arr.diff(dim_name).values
lower = xr.DataArray(np.empty_like(arr), dims=arr.dims,
coords=arr.coords)
lower.values[:-1] = arr.values[:-1] - 0.5*spacing
lower.values[-1] = arr.values[-1] - 0.5*spacing[-1]
upper = xr.DataArray(np.empty_like(arr), dims=arr.dims,
coords=arr.coords)
upper.values[:-1] = arr.values[:-1] + 0.5*spacing
upper.values[-1] = arr.values[-1] + 0.5*spacing[-1]
bounds = xr.concat([lower, upper], dim='bounds')
return bounds.T
示例2: concatenate_intensity_tables
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import concat [as 別名]
def concatenate_intensity_tables(
intensity_tables: List["IntensityTable"],
overlap_strategy: Optional[OverlapStrategy] = None
) -> "IntensityTable":
"""
Parameters
----------
intensity_tables: List[IntensityTable]
List of IntensityTables to be combined.
overlap_strategy
Returns
-------
"""
if overlap_strategy:
intensity_tables = IntensityTable._process_overlaps(
intensity_tables, overlap_strategy
)
return xr.concat(intensity_tables, dim=Features.AXIS)
示例3: concatenate
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import concat [as 別名]
def concatenate(expression_matrices: Iterable[ExpressionMatrix]) -> ExpressionMatrix:
"""Concatenate IntensityTables produced for different fields of view or across imaging rounds
Expression Matrices are concatenated along the cells axis, and the resulting arrays are stored
densely.
Parameters
----------
expression_matrices : Iterable[ExpressionMatrix]
iterable (list-like) of expression matrices to combine
Returns
-------
ExpressionMatrix :
Concatenated expression matrix containing all input cells. Missing gene values are filled
with np.nan
See Also
--------
Combine_first: http://xarray.pydata.org/en/stable/combining.html#combine
"""
concatenated_matrix: xr.DataArray = xr.concat(list(expression_matrices), Features.CELLS)
return ExpressionMatrix(concatenated_matrix)
示例4: convert_lons_lats_ncep
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import concat [as 別名]
def convert_lons_lats_ncep(ds, xs, ys):
if not isinstance(xs, slice):
first, second, last = np.asarray(xs)[[0,1,-1]]
xs = slice(first - 0.1*(second - first), last + 0.1*(second - first))
if not isinstance(ys, slice):
first, second, last = np.asarray(ys)[[0,1,-1]]
ys = slice(first - 0.1*(second - first), last + 0.1*(second - first))
ds = ds.sel(lat_0=ys)
# Lons should go from -180. to +180.
if len(ds.coords['lon_0'].sel(lon_0=slice(xs.start + 360., xs.stop + 360.))):
ds = xr.concat([ds.sel(lon_0=slice(xs.start + 360., xs.stop + 360.)),
ds.sel(lon_0=xs)],
dim="lon_0")
ds = ds.assign_coords(lon_0=np.where(ds.coords['lon_0'].values <= 180,
ds.coords['lon_0'].values,
ds.coords['lon_0'].values - 360.))
else:
ds = ds.sel(lon_0=xs)
ds = ds.rename({'lon_0': 'x', 'lat_0': 'y'})
ds = ds.assign_coords(lon=ds.coords['x'], lat=ds.coords['y'])
return ds
示例5: _open_files
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import concat [as 別名]
def _open_files(self, files):
import xarray as xr
das = [xr.open_rasterio(f, chunks=self.chunks, **self._kwargs)
for f in files]
out = xr.concat(das, dim=self.dim)
coords = {}
if self.pattern:
coords = {
k: xr.concat(
[xr.DataArray(
np.full(das[i].sizes.get(self.dim, 1), v),
dims=self.dim
) for i, v in enumerate(values)], dim=self.dim)
for k, values in reverse_formats(self.pattern, files).items()
}
return out.assign_coords(**coords).chunk(self.chunks)
示例6: test_extract_months
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import concat [as 別名]
def test_extract_months():
time = xr.DataArray(pd.date_range(start='2001-02-18', end='2002-07-12',
freq='1D'), dims=[TIME_STR])
months = 'mam' # March-April-May
desired = xr.concat([
xr.DataArray(pd.date_range(start='2001-03-01', end='2001-05-31',
freq='1D'), dims=[TIME_STR]),
xr.DataArray(pd.date_range(start='2002-03-01', end='2002-05-31',
freq='1D'), dims=[TIME_STR])
], dim=TIME_STR)
actual = extract_months(time, months)
xr.testing.assert_identical(actual, desired)
示例7: concat_data
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import concat [as 別名]
def concat_data(self, data, *args, **kwargs):
"""Concats data1 and data2 for xarray or pandas as needed
Parameters
----------
data : pandas or xarray
Data to be appended to data already within the Instrument object
Returns
-------
void
Instrument.data modified in place.
Notes
-----
For pandas, sort=False is passed along to the underlying
pandas.concat method. If sort is supplied as a keyword, the
user provided value is used instead.
For xarray, dim='time' is passed along to xarray.concat
except if the user includes a value for dim as a
keyword argument.
"""
if self.pandas_format:
if 'sort' in kwargs:
sort = kwargs['sort']
_ = kwargs.pop('sort')
else:
sort = False
return pds.concat(data, sort=sort, *args, **kwargs)
else:
if 'dim' in kwargs:
dim = kwargs['dim']
_ = kwargs.pop('dim')
else:
dim = 'time'
return xr.concat(data, dim=dim, *args, **kwargs)
示例8: _reshape
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import concat [as 別名]
def _reshape(da, window_width):
"""
Helper function for `fit` that splits the year and day
dimensions of the time-coordinate and bookends the years
e.g. (Dec15:31 + whole year + Jan1:15) if window_width is 31 days.
Parameters
----------
da : xr.DataArray, shape (n_samples, )
Samples
window_width : int
The size of the rolling window.
Returns
-------
ds_rsh : xr.Dataset, shape(day: 364 + n_bookend_days, year: n_years)
Reshaped xr.Dataset
"""
assert da.ndim == 1
if "time" not in da.coords and "index" in da.coords:
da = da.rename({"index": "time"})
assert "time" in da.coords
def split(g):
return g.rename({"time": "day"}).assign_coords(day=g.time.dt.dayofyear.values)
da_split = da.groupby("time.year").map(split)
early_jans = da_split.isel(day=slice(None, window_width // 2))
late_decs = da_split.isel(day=slice(-window_width // 2, None))
da_rsh = xr.concat([late_decs, da_split, early_jans], dim="day")
return da_rsh
示例9: _finalise_arr
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import concat [as 別名]
def _finalise_arr(self, arr, N):
if isinstance(arr, list):
logger.debug("Concatenating {N:d} DataArrays...".format(N=N))
if self.concat_coor is None:
return utils.concat_each_time_coordinate(*arr)
else:
return xarray.concat(arr, dim=self.concat_coor)
logger.debug("Done!")
else:
return self._correct_overallocation(arr, N)
示例10: test_multidimensional_error
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import concat [as 別名]
def test_multidimensional_error():
gdf = gpd.read_file(os.path.join(TEST_INPUT_DATA_DIR, "soil_data_flat.geojson"))
vxd = vectorxarray.from_geodataframe(gdf)
vxd2 = vxd.copy()
vxd.coords["time"] = parse("20170516T000000")
vxd2.coords["time"] = parse("20170517T000000")
merged_vxd = xarray.concat([vxd, vxd2], dim="time")
with pytest.raises(ValueError):
merged_vxd.vector.plot(column="sandtotal_r")
示例11: _bootstrap_dim
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import concat [as 別名]
def _bootstrap_dim(control, nlead_years, dim, dim_label):
"""
Add a `len(dim_label)` dimension `dim` to uninitialized control by random
resampling.
"""
c_start = 0
c_end = control['time'].size
leads = np.arange(1, 1 + nlead_years)
def isel_years(control, year_s, length=nlead_years):
new = control.isel(time=slice(year_s, year_s + length))
new = new.rename({'time': 'lead'})
new['lead'] = leads
return new
def create_pseudo_members(control):
startlist = np.random.randint(c_start, c_end - nlead_years - 1, len(dim_label))
return xr.concat([isel_years(control, start) for start in startlist], dim)
control_uninitialized = create_pseudo_members(control)
control_uninitialized[dim] = dim_label
return control_uninitialized
# TODO: refactoring needed. proposed steps:
# first calculate all EOFs. save those. then calc compute_relative_entropy
示例12: _same_verifs_alignment
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import concat [as 別名]
def _same_verifs_alignment(init_lead_matrix, valid_inits, all_verifs, leads, n, freq):
"""Returns initializations and verification dates, maintaining a common verification
window at all leads.
See ``return_inits_and_verif_dates`` for descriptions of expected variables.
"""
common_set_of_verifs = [
i for i in all_verifs if (i == init_lead_matrix).any('time').all('lead')
]
if not common_set_of_verifs:
raise CoordinateError(
'A common set of verification dates cannot be found for the '
'initializations and verification data supplied. Change `alignment` to '
"'same_inits' or 'maximize'."
)
# Force to CFTimeIndex for consistency with `same_inits`
verif_dates = xr.concat(common_set_of_verifs, 'time').to_index()
inits_that_verify_with_verif_dates = init_lead_matrix.isin(verif_dates)
inits = {
lead: valid_inits.where(
inits_that_verify_with_verif_dates.sel(lead=lead), drop=True
)
for lead in leads
}
verif_dates = {lead: verif_dates for lead in leads}
return inits, verif_dates
示例13: _construct_init_lead_matrix
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import concat [as 別名]
def _construct_init_lead_matrix(forecast, n, freq, leads):
"""Returns xr.DataArray of "real time" (init + lead) over all inits and leads.
Arguments:
forecast (``xarray object``): Prediction ensemble with ``init`` dim renamed to
``time`` and containing ``lead`` dim.
n (tuple of ints): Number of units to shift for ``leads``. ``value`` for
``CFTimeIndex.shift(value, str)``.
freq (str): Pandas frequency alias. ``str`` for
``CFTimeIndex.shift(value, str)``.
leads (list, array, xr.DataArray of ints): Leads to return offset for.
Returns:
init_lead_matrix (``xr.DataArray``): DataArray with x=inits and y=lead with
values corresponding to "real time", or ``init + lead`` over all inits and
leads.
"""
# Note that `init` is renamed to `time` in compute functions.
init_lead_matrix = xr.concat(
[
xr.DataArray(
shift_cftime_index(forecast, 'time', n, freq),
dims=['time'],
coords=[forecast['time']],
)
for n in n
],
'lead',
)
init_lead_matrix['lead'] = leads
return init_lead_matrix
示例14: decorrelation_time
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import concat [as 別名]
def decorrelation_time(da, r=20, dim='time'):
"""Calculate the decorrelaton time of a time series.
.. math::
\\tau_{d} = 1 + 2 * \\sum_{k=1}^{r}(\\alpha_{k})^{k}
Args:
da (xarray object): Time series.
r (optional int): Number of iterations to run the above formula.
dim (optional str): Time dimension for xarray object.
Returns:
Decorrelation time of time series.
Reference:
* Storch, H. v, and Francis W. Zwiers. Statistical Analysis in Climate
Research. Cambridge ; New York: Cambridge University Press, 1999.,
p.373
"""
one = xr.ones_like(da.isel({dim: 0}))
one = one.where(da.isel({dim: 0}).notnull())
return one + 2 * xr.concat(
[autocorr(da, dim=dim, lag=i) ** i for i in range(1, r)], 'it'
).sum('it')
# --------------------------------------------#
# Diagnostic Potential Predictability (DPP)
# Functions related to DPP from Boer et al.
# --------------------------------------------#
示例15: test_m2e
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import concat [as 別名]
def test_m2e(PM_da_initialized_1d):
"""Test many-to-ensemble-mean (m2e) comparison basic functionality.
Clean comparison: Remove one member from ensemble to use as reference.
Take the remaining members as forecasts."""
ds = PM_da_initialized_1d
aforecast, areference = __m2e.function(ds, metric=metric)
reference_list = []
forecast_list = []
for m in ds.member.values:
forecast = _drop_members(ds, removed_member=[m]).mean('member')
reference = ds.sel(member=m).squeeze()
forecast, reference = xr.broadcast(forecast, reference)
forecast_list.append(forecast)
reference_list.append(reference)
reference = xr.concat(reference_list, 'member')
forecast = xr.concat(forecast_list, 'member')
forecast['member'] = np.arange(forecast.member.size)
reference['member'] = np.arange(reference.member.size)
eforecast, ereference = forecast, reference
# very weak testing on shape
assert eforecast.size == aforecast.size
assert ereference.size == areference.size
assert_equal(eforecast, aforecast)
assert_equal(ereference, areference)