本文整理匯總了Python中xarray.open_mfdataset方法的典型用法代碼示例。如果您正苦於以下問題:Python xarray.open_mfdataset方法的具體用法?Python xarray.open_mfdataset怎麽用?Python xarray.open_mfdataset使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類xarray
的用法示例。
在下文中一共展示了xarray.open_mfdataset方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: to_netcdf
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import open_mfdataset [as 別名]
def to_netcdf(inpaths, outpath, cdm, engine):
import cf2cdm
import xarray as xr
# NOTE: noop if no input argument
if len(inpaths) == 0:
return
if not outpath:
outpath = os.path.splitext(inpaths[0])[0] + '.nc'
ds = xr.open_mfdataset(inpaths, engine=engine, combine='by_coords')
if cdm:
coord_model = getattr(cf2cdm, cdm)
ds = cf2cdm.translate_coords(ds, coord_model=coord_model)
ds.to_netcdf(outpath)
示例2: _get_grid_files
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import open_mfdataset [as 別名]
def _get_grid_files(self):
"""Get the files holding grid data for an aospy object."""
grid_file_paths = self.grid_file_paths
datasets = []
if isinstance(grid_file_paths, str):
grid_file_paths = [grid_file_paths]
for path in grid_file_paths:
try:
ds = xr.open_dataset(path, decode_times=False)
except (TypeError, AttributeError):
ds = xr.open_mfdataset(path, decode_times=False,
combine='by_coords').load()
except (RuntimeError, OSError) as e:
msg = str(e) + ': {}'.format(path)
raise RuntimeError(msg)
datasets.append(ds)
return tuple(datasets)
示例3: _open_ds_from_store
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import open_mfdataset [as 別名]
def _open_ds_from_store(fname, store_mod=None, store_cls=None, **kwargs):
"""Open a dataset and return it"""
if isinstance(fname, xr.Dataset):
return fname
if not isstring(fname):
try: # test iterable
fname[0]
except TypeError:
pass
else:
if store_mod is not None and store_cls is not None:
if isstring(store_mod):
store_mod = repeat(store_mod)
if isstring(store_cls):
store_cls = repeat(store_cls)
fname = [_open_store(sm, sc, f)
for sm, sc, f in zip(store_mod, store_cls, fname)]
kwargs['engine'] = None
kwargs['lock'] = False
return open_mfdataset(fname, **kwargs)
if store_mod is not None and store_cls is not None:
fname = _open_store(store_mod, store_cls, fname)
return open_dataset(fname, **kwargs)
示例4: open
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import open_mfdataset [as 別名]
def open(self, exact_dates=True, concat_dim='time', **dataset_kwargs):
"""
Open an xarray multi-file Dataset for the processed files with dates set using set_dates(), retrieve(), or
write(). Once opened, this Dataset is accessible by self.Dataset.
:param exact_dates: bool: if True, set the Dataset to have the exact dates of this instance; otherwise,
keep all of the monthly dates in the opened files
:param concat_dim: passed to xarray.open_mfdataset()
:param dataset_kwargs: kwargs passed to xarray.open_mfdataset()
"""
nc_file_dir = '%s/processed' % self._root_directory
if not self.dataset_dates:
raise ValueError("use set_dates() to specify times of data to load")
dates_index = pd.DatetimeIndex(self.dataset_dates).sort_values()
months = dates_index.to_period('M')
unique_months = months.unique()
nc_files = ['%s/%s%s.nc' % (nc_file_dir, self._file_id, d.strftime('%Y%m')) for d in unique_months]
self.Dataset = xr.open_mfdataset(nc_files, concat_dim=concat_dim, **dataset_kwargs)
if exact_dates:
self.Dataset = self.Dataset.sel(time=self.dataset_dates)
self.dataset_variables = list(self.Dataset.variables.keys())
示例5: prepare_meta_sarah
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import open_mfdataset [as 別名]
def prepare_meta_sarah(xs, ys, year, month, template_sis, template_sid, module, resolution=resolution):
fns = [next(glob.iglob(t.format(year=year, month=month)))
for t in (template_sis, template_sid)]
with xr.open_mfdataset(fns, compat='identical') as ds:
ds = _rename_and_clean_coords(ds)
ds = ds.coords.to_dataset()
t = pd.Timestamp(year=year, month=month, day=1)
ds['time'] = pd.date_range(t, t + pd.DateOffset(months=1),
freq='1h', closed='left')
if resolution is not None:
def p(s):
s += 0.1*resolution
return s - (s % resolution)
xs = np.arange(p(xs.start), p(xs.stop) + 1.1*resolution, resolution)
ys = np.arange(p(ys.start), p(ys.stop) - 0.1*resolution, - resolution)
ds = ds.sel(x=xs, y=ys, method='nearest')
else:
ds = ds.sel(x=as_slice(xs), y=as_slice(ys))
return ds.load()
示例6: _open_dataset
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import open_mfdataset [as 別名]
def _open_dataset(self):
import xarray as xr
url = self.urlpath
kwargs = self.xarray_kwargs
if "*" in url or isinstance(url, list):
_open_dataset = xr.open_mfdataset
if self.pattern:
kwargs.update(preprocess=self._add_path_to_ds)
if self.combine is not None:
if 'combine' in kwargs:
raise Exception("Setting 'combine' argument twice in the catalog is invalid")
kwargs.update(combine=self.combine)
if self.concat_dim is not None:
if 'concat_dim' in kwargs:
raise Exception("Setting 'concat_dim' argument twice in the catalog is invalid")
kwargs.update(concat_dim=self.concat_dim)
else:
_open_dataset = xr.open_dataset
url = fsspec.open_local(url, **self.storage_options)
self._ds = _open_dataset(url, chunks=self.chunks, **kwargs)
示例7: open_glm_time_series
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import open_mfdataset [as 別名]
def open_glm_time_series(filenames, chunks=None):
""" Convenience function for combining individual 1-min GLM gridded imagery
files into a single xarray.Dataset with a time dimension.
Creates an index on the time dimension.
The time dimension will be in the order in which the files are listed
due to the behavior of combine='nested' in open_mfdataset.
Adjusts the time_coverage_start and time_coverage_end metadata.
"""
# Need to fix time_coverage_start and _end in concat dataset
starts = [t for t in gen_file_times(filenames)]
ends = [t for t in gen_file_times(filenames, time_attr='time_coverage_end')]
d = xr.open_mfdataset(filenames, concat_dim='time', chunks=chunks, combine='nested')
d['time'] = starts
d = d.set_index({'time':'time'})
d = d.set_coords('time')
d.attrs['time_coverage_start'] = pd.Timestamp(min(starts)).isoformat()
d.attrs['time_coverage_end'] = pd.Timestamp(max(ends)).isoformat()
return d
示例8: _preprocess_and_rename_grid_attrs
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import open_mfdataset [as 別名]
def _preprocess_and_rename_grid_attrs(func, grid_attrs=None, **kwargs):
"""Call a custom preprocessing method first then rename grid attrs.
This wrapper is needed to generate a single function to pass to the
``preprocesss`` of xr.open_mfdataset. It makes sure that the
user-specified preprocess function is called on the loaded Dataset before
aospy's is applied. An example for why this might be needed is output from
the WRF model; one needs to add a CF-compliant units attribute to the time
coordinate of all input files, because it is not present by default.
Parameters
----------
func : function
An arbitrary function to call before calling
``grid_attrs_to_aospy_names`` in ``_load_data_from_disk``. Must take
an xr.Dataset as an argument as well as ``**kwargs``.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
function
A function that calls the provided function ``func`` on the Dataset
before calling ``grid_attrs_to_aospy_names``; this is meant to be
passed as a ``preprocess`` argument to ``xr.open_mfdataset``.
"""
def func_wrapper(ds):
return grid_attrs_to_aospy_names(func(ds, **kwargs), grid_attrs)
return func_wrapper
示例9: open_mfdataset_glob
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import open_mfdataset [as 別名]
def open_mfdataset_glob(self):
"""
Use xarray.open_mfdataset to read multiple netcdf files with a glob
pattern.
"""
pattern = os.path.join(self.data_dir, "*PropertiesRhineMeuse30min.nc")
xarray.open_mfdataset(pattern)
示例10: open_mfdataset_list
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import open_mfdataset [as 別名]
def open_mfdataset_list(self):
"""
Use xarray.open_mfdataset to read multiple netcdf files from a list.
"""
file_names = [os.path.join(self.data_dir, f)
for f in ('soilPropertiesRhineMeuse30min.nc',
'topoPropertiesRhineMeuse30min.nc')]
xarray.open_mfdataset(file_names)
示例11: track_start_volume
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import open_mfdataset [as 別名]
def track_start_volume(self, gdirs):
self.cfg_init()
path = os.path.join(cfg.PATHS['working_dir'], 'run_output_tstar*.nc')
with xr.open_mfdataset(path, combine='by_coords') as ds:
return float(ds.volume.sum(dim='rgi_id').isel(time=0)) * 1e-9
示例12: track_tstar_run_final_volume
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import open_mfdataset [as 別名]
def track_tstar_run_final_volume(self, gdirs):
self.cfg_init()
path = os.path.join(cfg.PATHS['working_dir'], 'run_output_tstar*.nc')
with xr.open_mfdataset(path, combine='by_coords') as ds:
return float(ds.volume.sum(dim='rgi_id').isel(time=-1)) * 1e-9
示例13: track_1990_run_final_volume
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import open_mfdataset [as 別名]
def track_1990_run_final_volume(self, gdirs):
self.cfg_init()
path = os.path.join(cfg.PATHS['working_dir'], 'run_output_pd*.nc')
with xr.open_mfdataset(path, combine='by_coords') as ds:
return float(ds.volume.sum(dim='rgi_id').isel(time=-1)) * 1e-9
示例14: track_avg_temp_full_period
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import open_mfdataset [as 別名]
def track_avg_temp_full_period(self, gdirs):
self.cfg_init()
path = os.path.join(cfg.PATHS['working_dir'], 'climate_input*.nc')
with xr.open_mfdataset(path, combine='by_coords') as ds:
return float(ds.temp.mean())
示例15: track_avg_prcp_full_period
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import open_mfdataset [as 別名]
def track_avg_prcp_full_period(self, gdirs):
self.cfg_init()
path = os.path.join(cfg.PATHS['working_dir'], 'climate_input*.nc')
with xr.open_mfdataset(path, combine='by_coords') as ds:
return float(ds.prcp.mean())