本文整理汇总了Python中dask.array.stack方法的典型用法代码示例。如果您正苦于以下问题:Python array.stack方法的具体用法?Python array.stack怎么用?Python array.stack使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类dask.array
的用法示例。
在下文中一共展示了array.stack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: StackColumns
# 需要导入模块: from dask import array [as 别名]
# 或者: from dask.array import stack [as 别名]
def StackColumns(*cols):
"""
Stack the input dask arrays vertically, column by column.
This uses :func:`dask.array.vstack`.
Parameters
----------
*cols : :class:`dask.array.Array`
the dask arrays to stack vertically together
Returns
-------
:class:`dask.array.Array` :
the dask array where columns correspond to the input arrays
Raises
------
TypeError
If the input columns are not dask arrays
"""
cols = da.broadcast_arrays(*cols)
return da.vstack(cols).T
示例2: delayed_dask_stack
# 需要导入模块: from dask import array [as 别名]
# 或者: from dask.array import stack [as 别名]
def delayed_dask_stack():
"""A 4D (20, 10, 10, 10) delayed dask array, simulates disk io."""
# we will return a dict with a 'calls' variable that tracks call count
output = {'calls': 0}
# create a delayed version of function that simply generates np.arrays
# but also counts when it has been called
@dask.delayed
def get_array():
nonlocal output
output['calls'] += 1
return np.random.rand(10, 10, 10)
# then make a mock "timelapse" of 3D stacks
# see https://napari.org/tutorials/applications/dask.html for details
_list = [get_array() for fn in range(20)]
output['stack'] = da.stack(
[da.from_delayed(i, shape=(10, 10, 10), dtype=np.float) for i in _list]
)
assert output['stack'].shape == (20, 10, 10, 10)
return output
示例3: test_prevent_dask_cache
# 需要导入模块: from dask import array [as 别名]
# 或者: from dask.array import stack [as 别名]
def test_prevent_dask_cache(delayed_dask_stack):
"""Test that pre-emptively setting cache to zero keeps it off"""
# the del is not required, it just shows that prior state of the cache
# does not matter... calling resize_dask_cache(0) will permanently disable
del utils.dask_cache
utils.resize_dask_cache(0)
v = viewer.ViewerModel()
dask_stack = delayed_dask_stack['stack']
# adding a new stack will not increase the cache size
v.add_image(dask_stack, multiscale=False, contrast_limits=(0, 1))
assert utils.dask_cache.cache.available_bytes == 0
# and the cache will not be populated
for i in range(3):
v.dims.set_point(0, i)
assert len(utils.dask_cache.cache.heap.heap) == 0
示例4: _call_ll2cr
# 需要导入模块: from dask import array [as 别名]
# 或者: from dask.array import stack [as 别名]
def _call_ll2cr(self, lons, lats, target_geo_def, swath_usage=0):
"""Wrap ll2cr() for handling dask delayed calls better."""
new_src = SwathDefinition(lons, lats)
swath_points_in_grid, cols, rows = ll2cr(new_src, target_geo_def)
# FIXME: How do we check swath usage/coverage if we only do this
# per-block
# # Determine if enough of the input swath was used
# grid_name = getattr(self.target_geo_def, "name", "N/A")
# fraction_in = swath_points_in_grid / float(lons.size)
# swath_used = fraction_in > swath_usage
# if not swath_used:
# LOG.info("Data does not fit in grid %s because it only %f%% of "
# "the swath is used" %
# (grid_name, fraction_in * 100))
# raise RuntimeError("Data does not fit in grid %s" % (grid_name,))
# else:
# LOG.debug("Data fits in grid %s and uses %f%% of the swath",
# grid_name, fraction_in * 100)
return np.stack([cols, rows], axis=0)
示例5: _call_fornav
# 需要导入模块: from dask import array [as 别名]
# 或者: from dask.array import stack [as 别名]
def _call_fornav(self, cols, rows, target_geo_def, data,
grid_coverage=0, **kwargs):
"""Wrap fornav() to run as a dask delayed."""
num_valid_points, res = fornav(cols, rows, target_geo_def,
data, **kwargs)
if isinstance(data, tuple):
# convert 'res' from tuple of arrays to one array
res = np.stack(res)
num_valid_points = sum(num_valid_points)
grid_covered_ratio = num_valid_points / float(res.size)
grid_covered = grid_covered_ratio > grid_coverage
if not grid_covered:
msg = "EWA resampling only found %f%% of the grid covered " \
"(need %f%%)" % (grid_covered_ratio * 100,
grid_coverage * 100)
raise RuntimeError(msg)
LOG.debug("EWA resampling found %f%% of the grid covered" %
(grid_covered_ratio * 100))
return res
示例6: classify
# 需要导入模块: from dask import array [as 别名]
# 或者: from dask.array import stack [as 别名]
def classify(texts):
batch_x_text = [clearstring(t) for t in texts]
batch_x = str_idx(batch_x_text, dict_sentiment['dictionary'], 100)
output_sentiment = sess_sentiment.run(
logits_sentiment, feed_dict = {x_sentiment: batch_x}
)
labels = [sentiment_label[l] for l in np.argmax(output_sentiment, 1)]
return da.stack(labels, axis = 0)
示例7: _get_schema
# 需要导入模块: from dask import array [as 别名]
# 或者: from dask.array import stack [as 别名]
def _get_schema(self):
from fsspec import open_files
import dask.array as da
if self._arr is None:
path = self._get_cache(self.path)[0]
files = open_files(path, 'rb', compression=None,
**self.storage)
if self.shape is None:
arr = NumpyAccess(files[0])
self.shape = arr.shape
self.dtype = arr.dtype
arrs = [arr] + [NumpyAccess(f, self.shape, self.dtype,
offset=arr.offset)
for f in files[1:]]
else:
arrs = [NumpyAccess(f, self.shape, self.dtype)
for f in files]
self.chunks = (self._chunks, ) + (-1, ) * (len(self.shape) - 1)
self._arrs = [da.from_array(arr, self.chunks) for arr in arrs]
if len(self._arrs) > 1:
self._arr = da.stack(self._arrs)
else:
self._arr = self._arrs[0]
self.chunks = self._arr.chunks
return Schema(dtype=str(self.dtype), shape=self.shape,
extra_metadata=self.metadata,
npartitions=self._arr.npartitions,
chunks=self.chunks)
示例8: test_dask_optimized_slicing
# 需要导入模块: from dask import array [as 别名]
# 或者: from dask.array import stack [as 别名]
def test_dask_optimized_slicing(delayed_dask_stack, monkeypatch):
"""Test that dask_configure reduces compute with dask stacks."""
# add dask stack to the viewer, making sure to pass multiscale and clims
v = viewer.ViewerModel()
dask_stack = delayed_dask_stack['stack']
v.add_image(dask_stack, multiscale=False, contrast_limits=(0, 1))
assert delayed_dask_stack['calls'] == 1 # the first stack will be loaded
# changing the Z plane should never incur calls
# since the stack has already been loaded (& it is chunked as a 3D array)
for i in range(3):
v.dims.set_point(1, i)
assert delayed_dask_stack['calls'] == 1 # still just the first call
# changing the timepoint will, of course, incur some compute calls
v.dims.set_point(0, 1)
assert delayed_dask_stack['calls'] == 2
v.dims.set_point(0, 2)
assert delayed_dask_stack['calls'] == 3
# but going back to previous timepoints should not, since they are cached
v.dims.set_point(0, 1)
v.dims.set_point(0, 0)
assert delayed_dask_stack['calls'] == 3
v.dims.set_point(0, 3)
assert delayed_dask_stack['calls'] == 4
示例9: lonlat2xyz
# 需要导入模块: from dask import array [as 别名]
# 或者: from dask.array import stack [as 别名]
def lonlat2xyz(lons, lats):
"""Convert geographic coordinates to cartesian 3D coordinates."""
R = 6370997.0
x_coords = R * da.cos(da.deg2rad(lats)) * da.cos(da.deg2rad(lons))
y_coords = R * da.cos(da.deg2rad(lats)) * da.sin(da.deg2rad(lons))
z_coords = R * da.sin(da.deg2rad(lats))
return da.stack(
(x_coords.ravel(), y_coords.ravel(), z_coords.ravel()), axis=-1)
示例10: lonlat2xyz
# 需要导入模块: from dask import array [as 别名]
# 或者: from dask.array import stack [as 别名]
def lonlat2xyz(lons, lats):
R = 6370997.0
x_coords = R * np.cos(np.deg2rad(lats)) * np.cos(np.deg2rad(lons))
y_coords = R * np.cos(np.deg2rad(lats)) * np.sin(np.deg2rad(lons))
z_coords = R * np.sin(np.deg2rad(lats))
stack = np.stack if isinstance(lons, np.ndarray) else da.stack
return stack(
(x_coords.ravel(), y_coords.ravel(), z_coords.ravel()), axis=-1)
示例11: test_uniform_comprehensions
# 需要导入模块: from dask import array [as 别名]
# 或者: from dask.array import stack [as 别名]
def test_uniform_comprehensions():
da_func = lambda arr: da_ndf.uniform_filter(arr, 1, origin=0) # noqa: E731
np.random.seed(0)
a = np.random.random((3, 12, 14))
d = da.from_array(a, chunks=(3, 6, 7))
l2s = [da_func(d[i]) for i in range(len(d))]
l2c = [da_func(d[i])[None] for i in range(len(d))]
dau.assert_eq(np.stack(l2s), da.stack(l2s))
dau.assert_eq(np.concatenate(l2c), da.concatenate(l2c))
示例12: test_order_comprehensions
# 需要导入模块: from dask import array [as 别名]
# 或者: from dask.array import stack [as 别名]
def test_order_comprehensions(da_func, kwargs):
np.random.seed(0)
a = np.random.random((3, 12, 14))
d = da.from_array(a, chunks=(3, 6, 7))
l2s = [da_func(d[i], **kwargs) for i in range(len(d))]
l2c = [da_func(d[i], **kwargs)[None] for i in range(len(d))]
dau.assert_eq(np.stack(l2s), da.stack(l2s))
dau.assert_eq(np.concatenate(l2c), da.concatenate(l2c))
示例13: test_edge_comprehensions
# 需要导入模块: from dask import array [as 别名]
# 或者: from dask.array import stack [as 别名]
def test_edge_comprehensions(da_func):
np.random.seed(0)
a = np.random.random((3, 12, 14))
d = da.from_array(a, chunks=(3, 6, 7))
l2s = [da_func(d[i]) for i in range(len(d))]
l2c = [da_func(d[i])[None] for i in range(len(d))]
dau.assert_eq(np.stack(l2s), da.stack(l2s))
dau.assert_eq(np.concatenate(l2c), da.concatenate(l2c))
示例14: test_generic_filter_comprehensions
# 需要导入模块: from dask import array [as 别名]
# 或者: from dask.array import stack [as 别名]
def test_generic_filter_comprehensions(da_func):
da_wfunc = lambda arr: da_func(arr, lambda x: x, 1) # noqa: E731
np.random.seed(0)
a = np.random.random((3, 12, 14))
d = da.from_array(a, chunks=(3, 6, 7))
l2s = [da_wfunc(d[i]) for i in range(len(d))]
l2c = [da_wfunc(d[i])[None] for i in range(len(d))]
dau.assert_eq(np.stack(l2s), da.stack(l2s))
dau.assert_eq(np.concatenate(l2c), da.concatenate(l2c))
示例15: test_convolutions_comprehensions
# 需要导入模块: from dask import array [as 别名]
# 或者: from dask.array import stack [as 别名]
def test_convolutions_comprehensions(da_func):
np.random.seed(0)
a = np.random.random((3, 12, 14))
d = da.from_array(a, chunks=(3, 6, 7))
weights = np.ones((1, 1))
l2s = [da_func(d[i], weights) for i in range(len(d))]
l2c = [da_func(d[i], weights)[None] for i in range(len(d))]
dau.assert_eq(np.stack(l2s), da.stack(l2s))
dau.assert_eq(np.concatenate(l2c), da.concatenate(l2c))