本文整理匯總了Python中dask.array.vstack方法的典型用法代碼示例。如果您正苦於以下問題:Python array.vstack方法的具體用法?Python array.vstack怎麽用?Python array.vstack使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類dask.array
的用法示例。
在下文中一共展示了array.vstack方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: StackColumns
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import vstack [as 別名]
def StackColumns(*cols):
"""
Stack the input dask arrays vertically, column by column.
This uses :func:`dask.array.vstack`.
Parameters
----------
*cols : :class:`dask.array.Array`
the dask arrays to stack vertically together
Returns
-------
:class:`dask.array.Array` :
the dask array where columns correspond to the input arrays
Raises
------
TypeError
If the input columns are not dask arrays
"""
cols = da.broadcast_arrays(*cols)
return da.vstack(cols).T
示例2: get_lonlats
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import vstack [as 別名]
def get_lonlats(self, nprocs=None, data_slice=None, cache=False, dtype=None, chunks=None):
"""Return lon and lat arrays of the area."""
if chunks is not None:
from dask.array import vstack
else:
vstack = np.vstack
llons = []
llats = []
try:
row_slice, col_slice = data_slice
except TypeError:
row_slice = slice(0, self.height)
col_slice = slice(0, self.width)
offset = 0
for definition in self.defs:
local_row_slice = slice(max(row_slice.start - offset, 0),
min(max(row_slice.stop - offset, 0), definition.height),
row_slice.step)
lons, lats = definition.get_lonlats(nprocs=nprocs, data_slice=(local_row_slice, col_slice),
cache=cache, dtype=dtype, chunks=chunks)
llons.append(lons)
llats.append(lats)
offset += lons.shape[0]
self.lons = vstack(llons)
self.lats = vstack(llats)
return self.lons, self.lats
示例3: __call__
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import vstack [as 別名]
def __call__(self, projectables, *args, **kwargs):
"""Generate the composite."""
from trollimage.image import rgb2ycbcr, ycbcr2rgb
projectables = self.match_data_arrays(projectables)
luminance = projectables[0].copy()
luminance /= 100.
# Limit between min(luminance) ... 1.0
luminance = da.where(luminance > 1., 1., luminance)
# Get the enhanced version of the composite to be sharpened
rgb_img = enhance2dataset(projectables[1])
# This all will be eventually replaced with trollimage convert() method
# ycbcr_img = rgb_img.convert('YCbCr')
# ycbcr_img.data[0, :, :] = luminance
# rgb_img = ycbcr_img.convert('RGB')
# Replace luminance of the IR composite
y__, cb_, cr_ = rgb2ycbcr(rgb_img.data[0, :, :],
rgb_img.data[1, :, :],
rgb_img.data[2, :, :])
r__, g__, b__ = ycbcr2rgb(luminance, cb_, cr_)
y_size, x_size = r__.shape
r__ = da.reshape(r__, (1, y_size, x_size))
g__ = da.reshape(g__, (1, y_size, x_size))
b__ = da.reshape(b__, (1, y_size, x_size))
rgb_img.data = da.vstack((r__, g__, b__))
return super(LuminanceSharpeningCompositor, self).__call__(rgb_img, *args, **kwargs)
示例4: pad_hrv_data
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import vstack [as 別名]
def pad_hrv_data(self, res):
"""Add empty pixels around the HRV."""
logger.debug('Padding HRV data to full disk')
nlines = int(self.mda['number_of_lines'])
segment_number = self.mda['segment_sequence_number']
current_first_line = (segment_number
- self.mda['planned_start_segment_number']) * nlines
bounds = self.epilogue['ImageProductionStats']['ActualL15CoverageHRV']
upper_south_line = bounds[
'LowerNorthLineActual'] - current_first_line - 1
upper_south_line = min(max(upper_south_line, 0), nlines)
data_list = list()
if upper_south_line > 0:
# we have some of the lower window
data_lower = pad_data(res[:upper_south_line, :].data,
(upper_south_line, 11136),
bounds['LowerEastColumnActual'],
bounds['LowerWestColumnActual'])
data_list.append(data_lower)
if upper_south_line < nlines:
# we have some of the upper window
data_upper = pad_data(res[upper_south_line:, :].data,
(nlines - upper_south_line, 11136),
bounds['UpperEastColumnActual'],
bounds['UpperWestColumnActual'])
data_list.append(data_upper)
return xr.DataArray(da.vstack(data_list), dims=('y', 'x'))
示例5: fit
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import vstack [as 別名]
def fit(
self,
X: Union[ArrayLike, DataFrameType],
y: Optional[Union[ArrayLike, SeriesType]] = None,
) -> "RobustScaler":
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" % str(self.quantile_range))
if isinstance(X, dd.DataFrame):
n_columns = len(X.columns)
partition_lengths = X.map_partitions(len).compute()
dtype = np.find_common_type(X.dtypes, [])
blocks = X.to_delayed()
X = da.vstack(
[
da.from_delayed(
block.values, shape=(length, n_columns), dtype=dtype
)
for block, length in zip(blocks, partition_lengths)
]
)
quantiles: Any = [da.percentile(col, [q_min, 50.0, q_max]) for col in X.T]
quantiles = da.vstack(quantiles).compute()
self.center_: List[float] = quantiles[:, 1]
self.scale_: List[float] = quantiles[:, 2] - quantiles[:, 0]
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
self.n_features_in_ = X.shape[1]
return self
示例6: _dense_fit
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import vstack [as 別名]
def _dense_fit(
self, X: Union[ArrayLike, DataFrameType], random_state: int
) -> Union[ArrayLike, DataFrameType]:
references = self.references_ * 100
quantiles = [da.percentile(col, references) for col in X.T]
(self.quantiles_,) = compute(da.vstack(quantiles).T)
示例7: _transform
# 需要導入模塊: from dask import array [as 別名]
# 或者: from dask.array import vstack [as 別名]
def _transform(
self, X: Union[ArrayLike, DataFrameType], inverse: bool = False
) -> Union[ArrayLike, DataFrameType]:
X = X.copy() # ...
transformed = [
self._transform_col(
X[:, feature_idx], self.quantiles_[:, feature_idx], inverse
)
for feature_idx in range(X.shape[1])
]
return da.vstack(transformed, allow_unknown_chunksizes=True).T