本文整理匯總了Python中dask.array.concatenate方法的典型用法代碼示例。如果您正苦於以下問題:Python array.concatenate方法的具體用法?Python array.concatenate怎麽用?Python array.concatenate使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類dask.array
的用法示例。
在下文中一共展示了array.concatenate方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _concatenate_chunks
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import concatenate [as 別名]
def _concatenate_chunks(chunks):
"""Concatenate chunks to full output array."""
# Form the full array
col, res = [], []
prev_y = 0
for y, x in sorted(chunks):
if len(chunks[(y, x)]) > 1:
chunk = da.nanmax(da.stack(chunks[(y, x)], axis=-1), axis=-1)
else:
chunk = chunks[(y, x)][0]
if y == prev_y:
col.append(chunk)
continue
res.append(da.concatenate(col, axis=1))
col = [chunk]
prev_y = y
res.append(da.concatenate(col, axis=1))
res = da.concatenate(res, axis=2).squeeze()
return res
示例2: find_concat_dim
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import concatenate [as 別名]
def find_concat_dim(da, possible_concat_dims):
""" look for available dimensions in dataaray and pick the one
from a list of candidates
PARAMETERS
----------
da : xarray.DataArray
xmitgcm llc data array
possible_concat_dims : list
list of potential dims
RETURNS
-------
out : str
dimension on which to concatenate
"""
out = None
for d in possible_concat_dims:
if d in da.dims:
out = d
return out
示例3: handle_crash
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import concatenate [as 別名]
def handle_crash(varr, vpath, ssname, vlist, varr_list, frame_dict):
seg1_list = list(filter(lambda v: re.search('seg1', v), vlist))
seg2_list = list(filter(lambda v: re.search('seg2', v), vlist))
if seg1_list and seg2_list:
tframe = frame_dict[ssname]
varr1 = darr.concatenate(
list(compress(varr_list, seg1_list)),
axis=0)
varr2 = darr.concatenate(
list(compress(varr_list, seg2_list)),
axis=0)
fm1, fm2 = varr1.shape[0], varr2.shape[0]
fm_crds = varr.coords['frame']
fm_crds1 = fm_crds.sel(frame=slice(None, fm1 - 1)).values
fm_crds2 = fm_crds.sel(frame=slice(fm1, None)).values
fm_crds2 = fm_crds2 + (tframe - fm_crds2.max())
fm_crds_new = np.concatenate([fm_crds1, fm_crds2], axis=0)
return varr.assign_coords(frame=fm_crds_new)
else:
return varr
示例4: test_blockwise_shufflesplit
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import concatenate [as 別名]
def test_blockwise_shufflesplit():
splitter = dask_ml.model_selection.ShuffleSplit(random_state=0)
assert splitter.get_n_splits() == 10
gen = splitter.split(dX)
train_idx, test_idx = next(gen)
assert isinstance(train_idx, da.Array)
assert isinstance(test_idx, da.Array)
assert train_idx.shape == (99,) # 90% of 110
assert test_idx.shape == (11,)
assert train_idx.chunks == ((45, 45, 9),)
assert test_idx.chunks == ((5, 5, 1),)
counts = pd.value_counts(train_idx.compute())
assert counts.max() == 1
N = len(X)
np.testing.assert_array_equal(
np.unique(da.concatenate([train_idx, test_idx])), np.arange(N)
)
示例5: _slice_padded
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import concatenate [as 別名]
def _slice_padded(self, _bounds):
pads = (max(-_bounds[0], 0), max(-_bounds[1], 0),
max(_bounds[2]-self.shape[2], 0), max(_bounds[3]-self.shape[1], 0))
bounds = (max(_bounds[0], 0),
max(_bounds[1], 0),
max(min(_bounds[2], self.shape[2]), 0),
max(min(_bounds[3], self.shape[1]), 0))
result = self[:, bounds[1]:bounds[3], bounds[0]:bounds[2]]
if pads[0] > 0:
dims = (result.shape[0], result.shape[1], pads[0])
result = da.concatenate([da.zeros(dims, chunks=dims, dtype=result.dtype),
result], axis=2)
if pads[2] > 0:
dims = (result.shape[0], result.shape[1], pads[2])
result = da.concatenate([result,
da.zeros(dims, chunks=dims, dtype=result.dtype)], axis=2)
if pads[1] > 0:
dims = (result.shape[0], pads[1], result.shape[2])
result = da.concatenate([da.zeros(dims, chunks=dims, dtype=result.dtype),
result], axis=1)
if pads[3] > 0:
dims = (result.shape[0], pads[3], result.shape[2])
result = da.concatenate([result,
da.zeros(dims, chunks=dims, dtype=result.dtype)], axis=1)
return (result, _bounds[0], _bounds[1])
示例6: read_bed
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import concatenate [as 別名]
def read_bed(filepath, nrows, ncols):
from dask.array import concatenate, from_delayed
from dask.delayed import delayed
chunk_size = 1024
row_start = 0
col_xs = []
while row_start < nrows:
row_end = min(row_start + chunk_size, nrows)
col_start = 0
row_xs = []
while col_start < ncols:
col_end = min(col_start + chunk_size, ncols)
x = delayed(_read_bed_chunk)(
filepath, nrows, ncols, row_start, row_end, col_start, col_end
)
shape = (row_end - row_start, col_end - col_start)
row_xs += [from_delayed(x, shape, float64)]
col_start = col_end
col_xs += [concatenate(row_xs, axis=1)]
row_start = row_end
X = concatenate(col_xs, axis=0)
return X
示例7: get_border_lonlats
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import concatenate [as 別名]
def get_border_lonlats(geo_def):
"""Get the border x- and y-coordinates."""
if geo_def.proj_dict['proj'] == 'geos':
lon_b, lat_b = get_geostationary_bounding_box(geo_def, 3600)
else:
lons, lats = geo_def.get_boundary_lonlats()
lon_b = np.concatenate((lons.side1, lons.side2, lons.side3, lons.side4))
lat_b = np.concatenate((lats.side1, lats.side2, lats.side3, lats.side4))
return lon_b, lat_b
示例8: find_concat_dim_facet
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import concatenate [as 別名]
def find_concat_dim_facet(da, facet, extra_metadata):
""" In llc grids, find along which horizontal dimension to concatenate
facet between i, i_g and j, j_g. If the order of the facet is F, concat
along i or i_g. If order is C, concat along j or j_g. Also return
horizontal dim not to concatenate
PARAMETERS
----------
da : xarray.DataArray
xmitgcm llc data array
facet : int
facet number
extra_metadata : dict
dict of extra_metadata from get_extra_metadata
RETURNS
-------
concat_dim, nonconcat_dim : str, str
names of the dimensions for concatenation or not
"""
order = extra_metadata['facet_orders'][facet]
if order == 'C':
possible_concat_dims = ['j', 'j_g']
elif order == 'F':
possible_concat_dims = ['i', 'i_g']
concat_dim = find_concat_dim(da, possible_concat_dims)
# we also need to other horizontal dimension for vector indexing
all_dims = list(da.dims)
# discard face
all_dims.remove('face')
# remove the concat_dim to find horizontal non_concat dimension
all_dims.remove(concat_dim)
non_concat_dim = all_dims[0]
return concat_dim, non_concat_dim
示例9: llc_facets_3d_spatial_to_compact
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import concatenate [as 別名]
def llc_facets_3d_spatial_to_compact(facets, dimname, extra_metadata):
""" Write in compact form a list of 3d facets
PARAMETERS
----------
facets : dict
dict of xarray.dataarrays for the facets
extra_metadata : dict
extra_metadata from get_extra_metadata
RETURNS
-------
flatdata : numpy.array
all the data in vector form
"""
nz = len(facets['facet0'][dimname])
nfacets = len(facets)
flatdata = np.array([])
for kz in range(nz):
# rebuild the dict
tmpdict = {}
for kfacet in range(nfacets):
this_facet = facets['facet' + str(kfacet)]
if this_facet is not None:
tmpdict['facet' + str(kfacet)] = this_facet.isel(k=kz)
else:
tmpdict['facet' + str(kfacet)] = None
# concatenate all 2d arrays
compact2d = llc_facets_2d_to_compact(tmpdict, extra_metadata)
flatdata = np.concatenate([flatdata, compact2d])
return flatdata
示例10: test_uniform_comprehensions
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import concatenate [as 別名]
def test_uniform_comprehensions():
da_func = lambda arr: da_ndf.uniform_filter(arr, 1, origin=0) # noqa: E731
np.random.seed(0)
a = np.random.random((3, 12, 14))
d = da.from_array(a, chunks=(3, 6, 7))
l2s = [da_func(d[i]) for i in range(len(d))]
l2c = [da_func(d[i])[None] for i in range(len(d))]
dau.assert_eq(np.stack(l2s), da.stack(l2s))
dau.assert_eq(np.concatenate(l2c), da.concatenate(l2c))
示例11: test_order_comprehensions
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import concatenate [as 別名]
def test_order_comprehensions(da_func, kwargs):
np.random.seed(0)
a = np.random.random((3, 12, 14))
d = da.from_array(a, chunks=(3, 6, 7))
l2s = [da_func(d[i], **kwargs) for i in range(len(d))]
l2c = [da_func(d[i], **kwargs)[None] for i in range(len(d))]
dau.assert_eq(np.stack(l2s), da.stack(l2s))
dau.assert_eq(np.concatenate(l2c), da.concatenate(l2c))
示例12: test_edge_comprehensions
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import concatenate [as 別名]
def test_edge_comprehensions(da_func):
np.random.seed(0)
a = np.random.random((3, 12, 14))
d = da.from_array(a, chunks=(3, 6, 7))
l2s = [da_func(d[i]) for i in range(len(d))]
l2c = [da_func(d[i])[None] for i in range(len(d))]
dau.assert_eq(np.stack(l2s), da.stack(l2s))
dau.assert_eq(np.concatenate(l2c), da.concatenate(l2c))
示例13: test_generic_filter_comprehensions
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import concatenate [as 別名]
def test_generic_filter_comprehensions(da_func):
da_wfunc = lambda arr: da_func(arr, lambda x: x, 1) # noqa: E731
np.random.seed(0)
a = np.random.random((3, 12, 14))
d = da.from_array(a, chunks=(3, 6, 7))
l2s = [da_wfunc(d[i]) for i in range(len(d))]
l2c = [da_wfunc(d[i])[None] for i in range(len(d))]
dau.assert_eq(np.stack(l2s), da.stack(l2s))
dau.assert_eq(np.concatenate(l2c), da.concatenate(l2c))
示例14: test_convolutions_comprehensions
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import concatenate [as 別名]
def test_convolutions_comprehensions(da_func):
np.random.seed(0)
a = np.random.random((3, 12, 14))
d = da.from_array(a, chunks=(3, 6, 7))
weights = np.ones((1, 1))
l2s = [da_func(d[i], weights) for i in range(len(d))]
l2c = [da_func(d[i], weights)[None] for i in range(len(d))]
dau.assert_eq(np.stack(l2s), da.stack(l2s))
dau.assert_eq(np.concatenate(l2c), da.concatenate(l2c))
示例15: test_laplace_comprehensions
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import concatenate [as 別名]
def test_laplace_comprehensions():
np.random.seed(0)
a = np.random.random((3, 12, 14))
d = da.from_array(a, chunks=(3, 6, 7))
l2s = [da_ndf.laplace(d[i]) for i in range(len(d))]
l2c = [da_ndf.laplace(d[i])[None] for i in range(len(d))]
dau.assert_eq(np.stack(l2s), da.stack(l2s))
dau.assert_eq(np.concatenate(l2c), da.concatenate(l2c))