本文整理匯總了Python中xarray.Dataset方法的典型用法代碼示例。如果您正苦於以下問題:Python xarray.Dataset方法的具體用法?Python xarray.Dataset怎麽用?Python xarray.Dataset使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類xarray
的用法示例。
在下文中一共展示了xarray.Dataset方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _save_files
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import Dataset [as 別名]
def _save_files(self, data, dtype_out_time):
"""Save the data to netcdf files in direc_out."""
path = self.path_out[dtype_out_time]
if not os.path.isdir(self.dir_out):
os.makedirs(self.dir_out)
if 'reg' in dtype_out_time:
try:
reg_data = xr.open_dataset(path)
except (EOFError, RuntimeError, IOError):
reg_data = xr.Dataset()
reg_data.update(data)
data_out = reg_data
else:
data_out = data
if isinstance(data_out, xr.DataArray):
data_out = xr.Dataset({self.name: data_out})
data_out.to_netcdf(path, engine='netcdf4')
示例2: set_grid_attrs_as_coords
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import Dataset [as 別名]
def set_grid_attrs_as_coords(ds):
"""Set available grid attributes as coordinates in a given Dataset.
Grid attributes are assumed to have their internal aospy names. Grid
attributes are set as coordinates, such that they are carried by all
selected DataArrays with overlapping index dimensions.
Parameters
----------
ds : Dataset
Input data
Returns
-------
Dataset
Dataset with grid attributes set as coordinates
"""
grid_attrs_in_ds = set(GRID_ATTRS.keys()).intersection(
set(ds.coords) | set(ds.data_vars))
ds = ds.set_coords(grid_attrs_in_ds)
return ds
示例3: rmse
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import Dataset [as 別名]
def rmse(x, y, dim):
"""
Compute Root Mean Squared Error.
Parameters
----------
x : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars
Mix of labeled and/or unlabeled arrays to which to apply the function.
y : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars
Mix of labeled and/or unlabeled arrays to which to apply the function.
dim : str
The dimension to apply the correlation along.
Returns
-------
Root Mean Squared Error
Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
numpy.ndarray, the first type on that list to appear on an input.
"""
return xs.rmse(x, y, dim)
示例4: mae
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import Dataset [as 別名]
def mae(x, y, dim):
"""
Compute Mean Absolute Error.
Parameters
----------
x : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars
Mix of labeled and/or unlabeled arrays to which to apply the function.
y : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars
Mix of labeled and/or unlabeled arrays to which to apply the function.
dim : str
The dimension to apply the correlation along.
Returns
-------
Mean Absolute Error
Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
numpy.ndarray, the first type on that list to appear on an input.
"""
return xs.mae(x, y, dim)
示例5: get_xarray_group
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import Dataset [as 別名]
def get_xarray_group(dataset, group):
"""Get pseudo group from xarray.Dataset
Args:
dataset: A xarray.Dataset object with pseudo groups.
group: The name of the group (can also be a subgroup).
Returns:
A xarray.Dataset with the pseudo group.
"""
if not group.endswith("/"):
group += "/"
group_vars = [
var
for var in dataset.variables
if var.startswith(group)
]
if not group_vars:
raise KeyError(f"The group {group} was not found!")
return dataset[group_vars]
示例6: check_collocation_data
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import Dataset [as 別名]
def check_collocation_data(dataset):
"""Check whether the dataset fulfills the standard of collocated data
Args:
dataset: A xarray.Dataset object
Raises:
A InvalidCollocationData Error if the dataset did not pass the test.
"""
mandatory_fields = ["Collocations/pairs", "Collocations/group"]
for mandatory_field in mandatory_fields:
if mandatory_field not in dataset.variables:
raise InvalidCollocationData(
f"Could not find the field '{mandatory_field}'!"
)
示例7: test_undo_xarray_floatification
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import Dataset [as 別名]
def test_undo_xarray_floatification(self):
ds = xarray.Dataset(
{"a": (["x"], numpy.array([1, 2, 3], dtype="f4")),
"b": (["x"], numpy.array([2.0, 3.0, 4.0])),
"c": (["x"], numpy.array(["2010-01-01", "2010-01-02",
"2010-01-03"], dtype="M8"))})
ds["a"].encoding = {"dtype": numpy.dtype("i4"),
"_FillValue": 1234}
# c should NOT be converted because it's a time
ds["c"].encoding = {"dtype": numpy.dtype("i8"),
"_FillValue": 12345}
ds2 = utils.undo_xarray_floatification(ds)
assert ds is not ds2 # has to be a copy
assert ds["a"].encoding == ds2["a"].encoding
assert numpy.allclose(ds["a"], ds2["a"])
assert ds2["a"].dtype == ds2["a"].encoding["dtype"]
assert (ds2["c"] == ds["c"]).all()
assert ds2["c"].dtype == ds["c"].dtype
assert ds2["b"].dtype == ds["b"].dtype
示例8: test_collocate_collapse_expand
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import Dataset [as 別名]
def test_collocate_collapse_expand(self):
"""Test whether collocating, collapsing and expanding work"""
collocator = Collocator()
test = xr.Dataset({
"time": ("time", np.arange("2000", "2010", dtype="M8[Y]")),
"lat": ("time", np.arange(10)),
"lon": ("time", np.arange(10)),
})
collocations = collocator.collocate(
test, test, max_interval="30 days",
max_distance="150 miles"
)
collapsed = collapse(collocations)
expanded = expand(collocations)
示例9: test_times
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import Dataset [as 別名]
def test_times(self):
"""Test if times are read correctly
"""
fh = NetCDF4()
with tempfile.TemporaryDirectory() as tdir:
tfile = os.path.join(tdir, "testfile.nc")
before = xr.Dataset(
{"a":
xr.DataArray(
np.array(
["2019-02-14T09:00:00", "2019-02-14T09:00:01"],
dtype="M8[ns]"))})
before["a"].encoding = {
"units": "seconds since 2019-02-14 09:00:00",
"scale_factor": 0.1}
fh.write(before, tfile)
after = fh.read(tfile)
assert np.array_equal(before["a"], after["a"])
示例10: test_scalefactor
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import Dataset [as 別名]
def test_scalefactor(self):
"""Test if scale factors written/read correctly
"""
fh = NetCDF4()
with tempfile.TemporaryDirectory() as tdir:
tfile = os.path.join(tdir, "testfile.nc")
before = xr.Dataset(
{"a":
xr.DataArray(
np.array([0.1, 0.2]))})
before["a"].encoding = {
"scale_factor": 0.1,
"_FillValue": 42,
"dtype": "int16"}
fh.write(before, tfile)
after = fh.read(tfile)
assert np.allclose(before["a"], after["a"])
示例11: __init__
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import Dataset [as 別名]
def __init__(self, **kwargs):
"""Initialise a Dataset object.
All keyword arguments will be translated into attributes.
Does not take positional arguments.
Note that if you create a dataset with a name that already exists,
the existing object is returned, but __init__ is still called
(Python does this, see
https://docs.python.org/3.7/reference/datamodel.html#object.__new__).
"""
self.mandatory_fields = set()
for (k, v) in kwargs.items():
setattr(self, k, v)
self.setlocal()
if self.my_pseudo_fields is None:
self.my_pseudo_fields = collections.OrderedDict()
示例12: _apply_limits_and_filters
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import Dataset [as 別名]
def _apply_limits_and_filters(self, cont, limits, simple_filters):
if isinstance(cont, xarray.Dataset):
if len(limits)>0:
raise NotImplementedError(
"limits not implemented on xarray datasets")
oldsize = cont[self.time_field].size
for f in simple_filters:
cont = f(cont)
logger.debug("Filters reduced number from "
"{:d} to {:d}".format(oldsize, cont[self.time_field].size))
return cont
oldsize = cont.size
cont = tpmath.array.limit_ndarray(cont, limits)
for f in simple_filters:
cont = f(cont)
if cont.size < oldsize:
logger.debug("Applying limitations, reducing "
"{:d} to {:d}".format(oldsize, cont.size))
return cont
示例13: _add_cont_to_arr
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import Dataset [as 別名]
def _add_cont_to_arr(self, arr, N, cont):
"""Changes arr in-situ, does not return"""
if isinstance(cont, xarray.Dataset):
# we should already know it's large enough
# for arr[self.time_field] I start at N
# for the other time coordinates at the relative "speed" they
# are behind N
# but this is not guaranteed to be regular so I would need to
# keep trac of each individually, or inspect it on-the-fly
# this approximation may be good enough for pre-allocation
# (which is approximate anyway), when actually storing we need
# to do a better job… for each time coordinate, check when it
# “dies”
raise NotImplementedError("This is not used for xarrays. "
"But see comment in source-code for some thoughts.")
else:
arr[N:(N+cont.size)] = cont
#arr = self._finalise_arr(arr, N)
示例14: get_additional_field
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import Dataset [as 別名]
def get_additional_field(self, M, fld):
"""Get additional field.
Get field from other dataset, original objects, or otherwise.
To be implemented by subclass implementations.
Exact fields depend on subclass.
Arguments:
M (ndarray): ndarray with existing data
A (masked) array with a dtype such as returned from
`self.read <Dataset.read>`.
fld (str): Additional field to read from original data
Returns:
ndarray with fields of M + fld.
"""
raise NotImplementedError("Must be implemented by child-class")
示例15: find_granules_sorted
# 需要導入模塊: import xarray [as 別名]
# 或者: from xarray import Dataset [as 別名]
def find_granules_sorted(self, dt_start=None, dt_end=None,
include_last_before=False, **extra):
"""Yield all granules, sorted by times.
For documentation, see :func:`~Dataset.find_granules`.
"""
allgran = list(self.find_granules(dt_start, dt_end,
include_last_before, **extra))
# I've been through all granules at least once, so all should be
# cached now; no need for additional hints when granule timeinfo
# obtainable only with hints from subdir, which is not included in
# the re-matching method
if extra.get("return_time", False):
yield from sorted(allgran)
else:
yield from sorted(allgran, key=self.get_times_for_granule)