本文整理匯總了Python中xarray.merge方法的典型用法代碼示例。如果您正苦於以下問題:Python xarray.merge方法的具體用法?Python xarray.merge怎麽用?Python xarray.merge使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類xarray
的用法示例。
在下文中一共展示了xarray.merge方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: read
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import merge [as 別名]
def read(self, filename, fields=None, **kwargs):
scene = Scene(
reader=self.satpy_reader,
filenames=[filename.path]
)
# If the user has not passed any fields to us, we load all per default.
if fields is None:
fields = scene.available_dataset_ids()
# Load all selected fields
scene.load(fields, **kwargs)
if isinstance(fields[0], str):
data_arrays = {field: scene.get(field) for field in fields}
else:
data_arrays = {field.name: scene.get(field) for field in fields}
for name, array in data_arrays.items():
array.name = name
dataset = xr.merge(data_arrays.values())
return dataset
示例2: merge_datasets
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import merge [as 別名]
def merge_datasets(datasets, **kwargs):
merged = []
for ds in datasets:
ds.attrs.pop('history', None)
for i, o in enumerate(merged):
if all(o.attrs[k] == ds.attrs[k] for k in o.attrs):
try:
o = xr.merge([o, ds], **kwargs)
o.attrs.update(ds.attrs)
merged[i] = o
break
except Exception:
pass
else:
merged.append(ds)
return merged
示例3: extend_map
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import merge [as 別名]
def extend_map(mapx, mapy, on):
if not hasattr(on, '__iter__'):
on = [on]
try:
inter = mapx[on].reset_index().merge(mapy[on].reset_index(), on=on)
except KeyError:
inter = pd.DataFrame()
extended = pd.DataFrame()
ext_sessions = list(set(mapx.sessions).union(set(mapy.sessions)))
for inter_idx, inter_row in inter.iterrows():
sx = mapx.loc[inter_row.loc['index_x'], mapx.sessions]
sy = mapy.loc[inter_row.loc['index_y'], mapy.sessions]
extrow = pd.concat([sx, sy]).drop_duplicates()
if len(extrow) <= len(ext_sessions):
extended = extended.append(extrow, ignore_index=True)
extended.sessions = ext_sessions
return extended
示例4: compute_correlations
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import merge [as 別名]
def compute_correlations(temp_comp, along, across):
segments = temp_comp.attrs['segments']
corr_list = []
for cur_anm in temp_comp['animal'].values:
for comb in itt.combinations_with_replacement(segments, 2):
dat_A = temp_comp.sel(animal=cur_anm, session_id=comb[0][0])
dat_A = dat_A.where(
dat_A['segment_id'] == comb[0][1],
drop=True).to_array().drop('segment_id').squeeze(
'variable', drop=True)
dat_B = temp_comp.sel(animal=cur_anm, session_id=comb[1][0])
dat_B = dat_B.where(
dat_B['segment_id'] == comb[1][1],
drop=True).to_array().drop('segment_id').squeeze(
'variable', drop=True)
if dat_A.size > 0 and dat_B.size > 0:
print("computing correlation of {} with {} for animal {}".
format(comb[0], comb[1], cur_anm))
cur_corr = corr2_coeff_xr(dat_A, dat_B, along, across)
cur_corr.coords['session_id_A'] = comb[0][0]
cur_corr.coords['session_id_B'] = comb[1][0]
cur_corr.coords['segment_id_A'] = comb[0][1]
cur_corr.coords['segment_id_B'] = comb[1][1]
print("merging")
return xr.merge(corr_list)
示例5: __init__
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import merge [as 別名]
def __init__(self, varr, match_dict, framerate=30):
if isinstance(varr, list):
varr = xr.merge(varr)
self.varr = varr
self.varr_hv = hv.Dataset(varr)
vh = self.varr_hv.range('height')
vw = self.varr_hv.range('width')
frange = self.varr_hv.range('frame')
self._h = int(vh[1] - vh[0] + 1)
self._w = int(vw[1] - vw[0] + 1)
self.match = match_dict
self.framerate = framerate
CStream = Stream.define(
'CStream', f=param.Integer(default=int(frange[0]), bounds=frange))
self.stream = CStream()
self.widgets = self._widgets()
示例6: open_minian
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import merge [as 別名]
def open_minian(dpath, fname='minian', backend='netcdf', chunks=None, post_process=None):
if backend is 'netcdf':
fname = fname + '.nc'
if chunks is 'auto':
chunks = dict([(d, 'auto') for d in ds.dims])
mpath = pjoin(dpath, fname)
with xr.open_dataset(mpath) as ds:
dims = ds.dims
chunks = dict([(d, 'auto') for d in dims])
ds = xr.open_dataset(os.path.join(dpath, fname), chunks=chunks)
if post_process:
ds = post_process(ds, mpath)
return ds
elif backend is 'zarr':
mpath = pjoin(dpath, fname)
dslist = [xr.open_zarr(pjoin(mpath, d)) for d in listdir(mpath) if isdir(pjoin(mpath, d))]
ds = xr.merge(dslist)
if chunks is 'auto':
chunks = dict([(d, 'auto') for d in ds.dims])
if post_process:
ds = post_process(ds, mpath)
return ds.chunk(chunks)
else:
raise NotImplementedError("backend {} not supported".format(backend))
示例7: union
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import merge [as 別名]
def union(dsets: List[xr.Dataset], options: Dict[str, Any] = None) -> xr.Dataset:
"""
Merge a list of datasets into a single dataset.
Parameters
----------
dsets : List[xr.Dataset]
A list of xarray.Dataset(s) to merge.
options : Dict, optional
Additional keyword arguments passed through to
:py:func:`~xarray.merge()`, by default None
Returns
-------
xr.Dataset
xarray Dataset
"""
options = options or {}
try:
return xr.merge(dsets, **options)
except Exception as exc:
logger.error('Failed to merge datasets.')
raise exc
示例8: add_xarray_groups
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import merge [as 別名]
def add_xarray_groups(ds, **kwargs):
"""Add a xarray.Dataset as a subgroup to another xarray.Dataset
Args:
ds: The root xarray.Dataset.
**kwargs: Keyword arguments: the key is the name of the group and the
value must be a xarray.Dataset.
Returns:
`ds` with the added subgroups
"""
datasets = [ds]
for group_name, group in kwargs.items():
group = group.rename(
{
var_name: "/".join([group_name, var_name])
for var_name in group.variables
},
)
# Add the group name also to the dimensions:
group = group.rename({
dim: "/".join([group_name, dim])
for dim in group.dims
if dim not in group.coords
})
datasets.append(group)
return xarray.merge(datasets)
示例9: rinexobs2
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import merge [as 別名]
def rinexobs2(fn: Path,
use: Sequence[str] = None,
tlim: Tuple[datetime, datetime] = None,
useindicators: bool = False,
meas: Sequence[str] = None,
verbose: bool = False,
*,
fast: bool = True,
interval: Union[float, int, timedelta] = None) -> xarray.Dataset:
if isinstance(use, str):
use = [use]
if use is None or not use[0].strip():
use = ('C', 'E', 'G', 'J', 'R', 'S')
obs = xarray.Dataset({}, coords={'time': [], 'sv': []})
attrs: Dict[str, Any] = {}
for u in use:
o = rinexsystem2(fn, system=u, tlim=tlim,
useindicators=useindicators, meas=meas,
verbose=verbose,
fast=fast, interval=interval)
if len(o.variables) > 0:
attrs = o.attrs
obs = xarray.merge((obs, o))
obs.attrs = attrs
return obs
示例10: compute_dataset
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import merge [as 別名]
def compute_dataset(ds, period='1W', incl_stdev=False):
if incl_stdev:
resample_obj = ds.resample(time=period)
ds_mean = resample_obj.mean(dim='time')
ds_std = resample_obj.std(dim='time').rename(name_dict={name: f"{name}_stdev" for name in ds.data_vars})
ds_merged = xr.merge([ds_mean, ds_std])
ds_merged.attrs.update(ds.attrs)
return ds_merged
else:
return ds.resample(time=period).mean(dim='time')
示例11: resample_and_merge_cubes
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import merge [as 別名]
def resample_and_merge_cubes(cubes: List[xr.Dataset],
cube_config: CubeConfig) -> xr.Dataset:
with observe_progress('Resampling cube(s)', len(cubes) + 1) as progress:
resampled_cubes = []
for cube in cubes:
resampled_cube = resample_cube(cube, cube_config)
resampled_cubes.append(resampled_cube)
progress.worked(1)
merged_cube = xr.merge(resampled_cubes) if len(resampled_cubes) > 1 else resampled_cubes[0]
progress.worked(1)
return merged_cube
示例12: calc_pdfs
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import merge [as 別名]
def calc_pdfs(ds, i, shr_mem, prog_mem, coords, len_coord_list):
"""
Calculates the probability density functions of the radiation for each month of the given dataset and
saves it to a new dataset.
Parameters
----------
ds: xarray dataset
with a data-variable 'SWGDN' containing time-series data at coordiantes coords
i: int
index where in shr_mem the result is to be saved
shr_mem : shared List
shared memory where all the calculated xk, pk values are stored
prog_mem : List
list indicating the overall progress of the computation, first value ([0]) is the total number
of coordinate tuples to compute.
coords: Tuple
(lat, lon) representing the location of the time-series in ds
len_coord_list: int
length of coord_list, used for progress bar
"""
ds_out = xr.Dataset()
for mo in range(1,13):
ds_mo = ds.sel(month=mo)
da_mo = ds_mo['SWGDN'].values
fig = plt.figure()
ax = fig.add_subplot()
xk, pk = sns_distplot(da_mo, ax=ax).get_lines()[0].get_data()
pk = pk / sum(pk)
ds_out_mo = pd.DataFrame({'xk': xk, 'pk': pk, 'lat': coords[0], 'lon': coords[1], 'month': mo, 'bins': range(0, len(pk))})
ds_out_mo = ds_out_mo.set_index(['lat', 'lon', 'month', 'bins'])
ds_out_xk_pk = ds_out_mo.to_xarray().copy()
ds_out = xr.merge([ds_out, ds_out_xk_pk])
plt.close()
shr_mem[i] = ds_out
prog_mem.append(1)
progress_bar(len(prog_mem), len_coord_list)
示例13: ds_2d_to_1d
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import merge [as 別名]
def ds_2d_to_1d(ds):
ds_temp = ds.reset_coords()
ds_1d = xr.merge([ds_temp['lon'][0, :], ds_temp['lat'][:, 0]])
ds_1d.coords['lon'] = ds_1d['lon']
ds_1d.coords['lat'] = ds_1d['lat']
return ds_1d
示例14: __init__
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import merge [as 別名]
def __init__(self, varr, marr=None, framerate=30):
if isinstance(varr, list):
varr = xr.merge(varr)
self.varr = hv.Dataset(varr)
if marr is not None:
self.marr = hv.Dataset(marr)
else:
self.marr = None
self.framerate = framerate
CStream = Stream.define(
'CStream',
f=param.Integer(default=0, bounds=self.varr.range('frame')))
self.stream = CStream()
self.widgets = self._widgets()
示例15: _im_overlay
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import merge [as 別名]
def _im_overlay(self, f, A, C, im_hsv, contour=None):
f = int(f)
AdC = A.dot(C.sel(frame=f))
im_hue = im_hsv.sel(cspace='H').rename('H').drop('cspace')
im_sat = (im_hsv.sel(cspace='S')).rename('S').drop('cspace')
im_val = (im_hsv.sel(cspace='V') * AdC * 4).clip(
0, 1).rename('V').drop('cspace')
ds = xr.merge([im_hue, im_sat, im_val])
im = hv.HSV(ds, kdims=['width', 'height'])
# if contour is None:
# contour = hv.operation.contours(im)
return im