本文整理汇总了Python中numpy.ma.empty方法的典型用法代码示例。如果您正苦于以下问题:Python ma.empty方法的具体用法?Python ma.empty怎么用?Python ma.empty使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy.ma
的用法示例。
在下文中一共展示了ma.empty方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_prepare_array_maskedarrays
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import empty [as 别名]
def test_prepare_array_maskedarrays():
"""Convert masked array data into a proper array."""
# input is ma.masked_array
data = ma.empty((1, 1, 1))
# output ndarray
output = prepare_array(data, masked=False)
assert isinstance(output, np.ndarray)
assert not isinstance(output, ma.masked_array)
assert output.shape == (1, 1, 1)
# output masked array
output = prepare_array(data)
assert isinstance(output, ma.masked_array)
assert output.shape == (1, 1, 1)
# input is ma.masked_array with full mask
data = ma.masked_array(data=np.ones((1, 1, 1)), mask=np.ones((1, 1, 1)))
# output ndarray
output = prepare_array(data, masked=False)
assert isinstance(output, np.ndarray)
assert not isinstance(output, ma.masked_array)
assert output.shape == (1, 1, 1)
# output masked array
output = prepare_array(data)
assert isinstance(output, ma.masked_array)
assert output.shape == (1, 1, 1)
示例2: comp_gradient
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import empty [as 别名]
def comp_gradient(blob): # compare g within sub blob, a component of intra_blob
dert__ = ma.empty(shape=blob.dert__.shape, dtype=int) # initialize dert__
g__ = ma.array(blob.dert__[:, :, 3], mask=~blob.map) # apply mask = ~map
dy__ = g__[2:, 1:-1] - g__[:-2, 1:-1] # vertical comp between rows -> dy, (1:-1): first and last column are discarded
dx__ = g__[1:-1, 2:] - g__[1:-1, :-2] # lateral comp between columns -> dx, (1:-1): first and last row are discarded
gg__ = np.hypot(dy__, dx__) - ave # deviation of gradient
# pack all derts into dert__
dert__[:, :, 0] = g__
dert__[1:-1, 1:-1, 1] = dy__ # first row, last row, first column and last-column are discarded
dert__[1:-1, 1:-1, 2] = dx__
dert__[1:-1, 1:-1, 3] = gg__
blob.new_dert__[0] = dert__ # pack dert__ into blob
return 1 # comp rng
示例3: comp_angle
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import empty [as 别名]
def comp_angle(blob): # compute and compare angles, a component of intra_blob
# compute angles:
dy__ = ma.array(blob.dert__[:, :, 1], mask=~blob.map)
dx__ = ma.array(blob.dert__[:, :, 2], mask=~blob.map)
a__ = np.arctan2(dy__, dx__) * angle_coef + 128
# compare angles:
dert__ = ma.empty(shape=blob.dert__.shape, dtype=int) # initialize dert__
day__ = correct_da(a__[2:, 1:-1] - a__[:-2, 1:-1]) # vertical comp between rows -> dy, (1:-1): first and last column are discarded
dax__ = correct_da(a__[1:-1, 2:] - a__[1:-1, :-2]) # lateral comp between columns -> dx, (1:-1): first and last row are discarded
ga__ = np.hypot(day__, dax__) - ave # deviation of gradient
# pack all derts into dert__
dert__[:, :, 0] = a__
dert__[1:-1, 1:-1, 1] = day__ # first row, last row, first column and last-column are discarded
dert__[1:-1, 1:-1, 2] = dax__
dert__[1:-1, 1:-1, 3] = ga__
blob.new_dert__[0] = dert__ # pack dert__ into blob
return 1 # comp rng
# ---------- comp_angle() end --------------------------------------------------------------------------------------
示例4: argstoarray
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import empty [as 别名]
def argstoarray(*args):
"""
Constructs a 2D array from a group of sequences.
Sequences are filled with missing values to match the length of the longest
sequence.
Parameters
----------
args : sequences
Group of sequences.
Returns
-------
argstoarray : MaskedArray
A ( `m` x `n` ) masked array, where `m` is the number of arguments and
`n` the length of the longest argument.
Notes
-----
`numpy.ma.row_stack` has identical behavior, but is called with a sequence
of sequences.
"""
if len(args) == 1 and not isinstance(args[0], ndarray):
output = ma.asarray(args[0])
if output.ndim != 2:
raise ValueError("The input should be 2D")
else:
n = len(args)
m = max([len(k) for k in args])
output = ma.array(np.empty((n,m), dtype=float), mask=True)
for (k,v) in enumerate(args):
output[k,:len(v)] = v
output[np.logical_not(np.isfinite(output._data))] = masked
return output
示例5: argstoarray
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import empty [as 别名]
def argstoarray(*args):
"""
Constructs a 2D array from a group of sequences.
Sequences are filled with missing values to match the length of the longest
sequence.
Parameters
----------
args : sequences
Group of sequences.
Returns
-------
argstoarray : MaskedArray
A ( `m` x `n` ) masked array, where `m` is the number of arguments and
`n` the length of the longest argument.
Notes
-----
numpy.ma.row_stack has identical behavior, but is called with a sequence of
sequences.
"""
if len(args) == 1 and not isinstance(args[0], ndarray):
output = ma.asarray(args[0])
if output.ndim != 2:
raise ValueError("The input should be 2D")
else:
n = len(args)
m = max([len(k) for k in args])
output = ma.array(np.empty((n,m), dtype=float), mask=True)
for (k,v) in enumerate(args):
output[k,:len(v)] = v
output[np.logical_not(np.isfinite(output._data))] = masked
return output
#####--------------------------------------------------------------------------
#---- --- Ranking ---
#####--------------------------------------------------------------------------
示例6: merge_structured_arrays
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import empty [as 别名]
def merge_structured_arrays(*arrays):
"""merge a set of structured numpy arrays
Parameters
----------
*arrays
a set of masked structured arrays. all arrays must have the same number
of rows.
Returns
-------
x : MaskedArray
the merged array
"""
#concatenate all of the sub-arrays fields
dtype = sum((array.dtype.descr for array in arrays), [])
#empty array with all fields
x = ma.empty(len(arrays[0]), dtype=dtype)
x = SerializableMaskedArray(x)
#assign each field
for array in arrays:
for name in array.dtype.names:
x[name] = array[name]
return x
示例7: test_prepare_array_iterables
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import empty [as 别名]
def test_prepare_array_iterables():
"""Convert iterable data into a proper array."""
# input is iterable
# iterable contains arrays
data = [np.zeros((1, 1))]
# output ndarray
output = prepare_array(data, masked=False)
assert isinstance(output, np.ndarray)
assert not isinstance(output, ma.masked_array)
assert output.shape == (1, 1, 1)
# output masked array
output = prepare_array(data)
assert isinstance(output, ma.masked_array)
assert output.shape == (1, 1, 1)
# iterable contains masked arrays
data = [ma.empty((1, 1))]
output = prepare_array(data, masked=False)
assert isinstance(output, np.ndarray)
assert not isinstance(output, ma.masked_array)
assert output.shape == (1, 1, 1)
# output masked array
output = prepare_array(data)
assert isinstance(output, ma.masked_array)
assert output.shape == (1, 1, 1)
# iterable contains masked arrays with full mask
data = [ma.masked_array(data=np.ones((1, 1)), mask=np.ones((1, 1)))]
output = prepare_array(data, masked=False)
assert isinstance(output, np.ndarray)
assert not isinstance(output, ma.masked_array)
assert output.shape == (1, 1, 1)
# output masked array
output = prepare_array(data)
assert isinstance(output, ma.masked_array)
assert output.shape == (1, 1, 1)
示例8: __getitem__
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import empty [as 别名]
def __getitem__(self, item):
if isinstance(item, str):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.Row(self, item.item())
elif self._is_list_or_tuple_of_str(item):
out = self.__class__([self[x] for x in item],
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
out.meta = self.meta.copy() # Shallow copy for meta
return out
elif ((isinstance(item, np.ndarray) and item.size == 0) or
(isinstance(item, (tuple, list)) and not item)):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
isinstance(item, tuple) and all(isinstance(x, np.ndarray)
for x in item)):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError('Illegal type {} for table item access'
.format(type(item)))
示例9: rankdata
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import empty [as 别名]
def rankdata(data, axis=None, use_missing=False):
"""Returns the rank (also known as order statistics) of each data point
along the given axis.
If some values are tied, their rank is averaged.
If some values are masked, their rank is set to 0 if use_missing is False,
or set to the average rank of the unmasked values if use_missing is True.
Parameters
----------
data : sequence
Input data. The data is transformed to a masked array
axis : {None,int}, optional
Axis along which to perform the ranking.
If None, the array is first flattened. An exception is raised if
the axis is specified for arrays with a dimension larger than 2
use_missing : bool, optional
Whether the masked values have a rank of 0 (False) or equal to the
average rank of the unmasked values (True).
"""
def _rank1d(data, use_missing=False):
n = data.count()
rk = np.empty(data.size, dtype=float)
idx = data.argsort()
rk[idx[:n]] = np.arange(1,n+1)
if use_missing:
rk[idx[n:]] = (n+1)/2.
else:
rk[idx[n:]] = 0
repeats = find_repeats(data.copy())
for r in repeats[0]:
condition = (data == r).filled(False)
rk[condition] = rk[condition].mean()
return rk
data = ma.array(data, copy=False)
if axis is None:
if data.ndim > 1:
return _rank1d(data.ravel(), use_missing).reshape(data.shape)
else:
return _rank1d(data, use_missing)
else:
return ma.apply_along_axis(_rank1d,axis,data,use_missing).view(ndarray)
示例10: plotting_positions
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import empty [as 别名]
def plotting_positions(data, alpha=0.4, beta=0.4):
"""
Returns plotting positions (or empirical percentile points) for the data.
Plotting positions are defined as ``(i-alpha)/(n+1-alpha-beta)``, where:
- i is the rank order statistics
- n is the number of unmasked values along the given axis
- `alpha` and `beta` are two parameters.
Typical values for `alpha` and `beta` are:
- (0,1) : ``p(k) = k/n``, linear interpolation of cdf (R, type 4)
- (.5,.5) : ``p(k) = (k-1/2.)/n``, piecewise linear function
(R, type 5)
- (0,0) : ``p(k) = k/(n+1)``, Weibull (R type 6)
- (1,1) : ``p(k) = (k-1)/(n-1)``, in this case,
``p(k) = mode[F(x[k])]``. That's R default (R type 7)
- (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``, then
``p(k) ~ median[F(x[k])]``.
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x. (R type 8)
- (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``, Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed (R type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
- (.3175, .3175): used in scipy.stats.probplot
Parameters
----------
data : array_like
Input data, as a sequence or array of dimension at most 2.
alpha : float, optional
Plotting positions parameter. Default is 0.4.
beta : float, optional
Plotting positions parameter. Default is 0.4.
Returns
-------
positions : MaskedArray
The calculated plotting positions.
"""
data = ma.array(data, copy=False).reshape(1,-1)
n = data.count()
plpos = np.empty(data.size, dtype=float)
plpos[n:] = 0
plpos[data.argsort(axis=None)[:n]] = ((np.arange(1, n+1) - alpha) /
(n + 1.0 - alpha - beta))
return ma.array(plpos, mask=data._mask)
示例11: rankdata
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import empty [as 别名]
def rankdata(data, axis=None, use_missing=False):
"""Returns the rank (also known as order statistics) of each data point
along the given axis.
If some values are tied, their rank is averaged.
If some values are masked, their rank is set to 0 if use_missing is False,
or set to the average rank of the unmasked values if use_missing is True.
Parameters
----------
data : sequence
Input data. The data is transformed to a masked array
axis : {None,int}, optional
Axis along which to perform the ranking.
If None, the array is first flattened. An exception is raised if
the axis is specified for arrays with a dimension larger than 2
use_missing : {boolean}, optional
Whether the masked values have a rank of 0 (False) or equal to the
average rank of the unmasked values (True).
"""
#
def _rank1d(data, use_missing=False):
n = data.count()
rk = np.empty(data.size, dtype=float)
idx = data.argsort()
rk[idx[:n]] = np.arange(1,n+1)
#
if use_missing:
rk[idx[n:]] = (n+1)/2.
else:
rk[idx[n:]] = 0
#
repeats = find_repeats(data.copy())
for r in repeats[0]:
condition = (data == r).filled(False)
rk[condition] = rk[condition].mean()
return rk
#
data = ma.array(data, copy=False)
if axis is None:
if data.ndim > 1:
return _rank1d(data.ravel(), use_missing).reshape(data.shape)
else:
return _rank1d(data, use_missing)
else:
return ma.apply_along_axis(_rank1d,axis,data,use_missing).view(ndarray)
#####--------------------------------------------------------------------------
#---- --- Central tendency ---
#####--------------------------------------------------------------------------
示例12: plotting_positions
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import empty [as 别名]
def plotting_positions(data, alpha=0.4, beta=0.4):
"""
Returns plotting positions (or empirical percentile points) for the data.
Plotting positions are defined as ``(i-alpha)/(n+1-alpha-beta)``, where:
- i is the rank order statistics
- n is the number of unmasked values along the given axis
- `alpha` and `beta` are two parameters.
Typical values for `alpha` and `beta` are:
- (0,1) : ``p(k) = k/n``, linear interpolation of cdf (R, type 4)
- (.5,.5) : ``p(k) = (k-1/2.)/n``, piecewise linear function
(R, type 5)
- (0,0) : ``p(k) = k/(n+1)``, Weibull (R type 6)
- (1,1) : ``p(k) = (k-1)/(n-1)``, in this case,
``p(k) = mode[F(x[k])]``. That's R default (R type 7)
- (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``, then
``p(k) ~ median[F(x[k])]``.
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x. (R type 8)
- (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``, Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed (R type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
- (.3175, .3175): used in scipy.stats.probplot
Parameters
----------
data : array_like
Input data, as a sequence or array of dimension at most 2.
alpha : float, optional
Plotting positions parameter. Default is 0.4.
beta : float, optional
Plotting positions parameter. Default is 0.4.
Returns
-------
positions : MaskedArray
The calculated plotting positions.
"""
data = ma.array(data, copy=False).reshape(1,-1)
n = data.count()
plpos = np.empty(data.size, dtype=float)
plpos[n:] = 0
plpos[data.argsort()[:n]] = (np.arange(1, n+1) - alpha) / \
(n + 1.0 - alpha - beta)
return ma.array(plpos, mask=data._mask)
示例13: plotting_positions
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import empty [as 别名]
def plotting_positions(data, alpha=0.4, beta=0.4):
"""
Returns plotting positions (or empirical percentile points) for the data.
Plotting positions are defined as ``(i-alpha)/(n+1-alpha-beta)``, where:
- i is the rank order statistics
- n is the number of unmasked values along the given axis
- `alpha` and `beta` are two parameters.
Typical values for `alpha` and `beta` are:
- (0,1) : ``p(k) = k/n``, linear interpolation of cdf (R, type 4)
- (.5,.5) : ``p(k) = (k-1/2.)/n``, piecewise linear function
(R, type 5)
- (0,0) : ``p(k) = k/(n+1)``, Weibull (R type 6)
- (1,1) : ``p(k) = (k-1)/(n-1)``, in this case,
``p(k) = mode[F(x[k])]``. That's R default (R type 7)
- (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``, then
``p(k) ~ median[F(x[k])]``.
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x. (R type 8)
- (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``, Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed (R type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
- (.3175, .3175): used in scipy.stats.probplot
Parameters
----------
data : array_like
Input data, as a sequence or array of dimension at most 2.
alpha : float, optional
Plotting positions parameter. Default is 0.4.
beta : float, optional
Plotting positions parameter. Default is 0.4.
Returns
-------
positions : MaskedArray
The calculated plotting positions.
"""
data = ma.array(data, copy=False).reshape(1,-1)
n = data.count()
plpos = np.empty(data.size, dtype=float)
plpos[n:] = 0
plpos[data.argsort()[:n]] = ((np.arange(1, n+1) - alpha) /
(n + 1.0 - alpha - beta))
return ma.array(plpos, mask=data._mask)
示例14: test_reproject_geometry
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import empty [as 别名]
def test_reproject_geometry(landpoly):
"""Reproject geometry."""
with fiona.open(landpoly, "r") as src:
for feature in src:
# WGS84 to Spherical Mercator
out_geom = reproject_geometry(
shape(feature["geometry"]), CRS(src.crs),
CRS().from_epsg(3857))
assert out_geom.is_valid
# WGS84 to LAEA
out_geom = reproject_geometry(
shape(feature["geometry"]), CRS(src.crs),
CRS().from_epsg(3035))
assert out_geom.is_valid
# WGS84 to WGS84
out_geom = reproject_geometry(
shape(feature["geometry"]), CRS(src.crs),
CRS().from_epsg(4326))
assert out_geom.is_valid
# WGS84 bounds to Spherical Mercator
big_box = box(-180, -90, 180, 90)
reproject_geometry(big_box, CRS().from_epsg(4326), CRS().from_epsg(3857))
# WGS84 bounds to Spherical Mercator raising clip error
with pytest.raises(RuntimeError):
reproject_geometry(
big_box, CRS().from_epsg(4326), CRS().from_epsg(3857),
error_on_clip=True
)
outside_box = box(-180, 87, 180, 90)
assert reproject_geometry(
outside_box, CRS().from_epsg(4326), CRS().from_epsg(3857),
).is_valid
# empty geometry
assert reproject_geometry(
Polygon(), CRS().from_epsg(4326), CRS().from_epsg(3857)).is_empty
assert reproject_geometry(
Polygon(), CRS().from_epsg(4326), CRS().from_epsg(4326)).is_empty
# CRS parameter
big_box = box(-180, -90, 180, 90)
assert reproject_geometry(
big_box, 4326, 3857) == reproject_geometry(
big_box, "4326", "3857")
with pytest.raises(TypeError):
reproject_geometry(big_box, 1.0, 1.0)
示例15: comp_range
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import empty [as 别名]
def comp_range(blob): # compare rng-distant pixels within blob: a component of intra_blob
rng = blob.rng + 1
p__ = ma.array(blob.dert__[:, :, 0], mask=~blob.map) # apply mask = ~map
dy__ = ma.array(blob.dert__[:, :, 1], mask=~blob.map)
dx__ = ma.array(blob.dert__[:, :, 2], mask=~blob.map)
dert__ = ma.empty(shape=(height, width, 4), dtype=int) # initialize new dert__
comp_rng = rng * 2
# vertical comp:
d__ = p__[comp_rng:, rng:-rng] - p__[:-comp_rng, rng:-rng] # bilateral comparison between p at coordinates (x, y + rng) and p at coordinates (x, y - rng)
dy__[rng:-rng, rng:-rng] += d__ # bilateral accumulation on dy (x, y)
# horizontal comp:
d__ = p__[rng:-rng, comp_rng:] - p__[rng:-rng, :-comp_rng] # bilateral comparison between p at coordinates (x + rng, y) and p at coordinates (x - rng, y)
dx__[rng:-rng, rng:-rng] += d__ # bilateral accumulation on dy (x, y)
# diagonal comparison:
for xd in range(1, rng):
yd = rng - xd # half y and x distance between comparands
bi_xd = xd * 2
bi_yd = comp_rng - bi_xd # y and x distance between comparands
hyp = hypot(bi_yd, bi_xd)
y_coef = bi_yd / hyp # to decompose d into dy
x_coef = bi_xd / hyp # to decompose d into dx
# top-left and bottom-right quadrants:
d__ = p__[bi_yd:, bi_xd:] - p__[:-bi_yd, :-bi_xd] # comparison between p (x - xd, y - yd) and p (x + xd, y + yd)
# decompose d to dy, dx:
temp_dy__ = d__ * y_coef # buffer for dy accumulation
temp_dx__ = d__ * x_coef # buffer for dx accumulation
# accumulate dy, dx:
dy__[yd:-yd, xd:-xd] += temp_dy__.astype(int) # bilateral accumulation on dy (x, y)
dx__[yd:-yd, xd:-xd] += temp_dx__.astype(int) # bilateral accumulation on dx (x, y)
# top-right and bottom-left quadrants:
d__ = p__[bi_yd:, :-bi_xd] - p__[:-bi_yd, bi_xd:] # comparison between p (x + xd, y - yd) and p (x - xd, y + yd)
# decompose d to dy, dx:
temp_dy__ = d__ * y_coef # buffer for dy accumulation
temp_dx__ = -(d__ * x_coef) # buffer for dx accumulation, sign inverted with comp direction
# accumulate dy, dx:
dy__[yd:-yd, xd:-xd] += temp_dy__.astype(int) # bilateral accumulation on dy (x, y)
dx__[yd:-yd, xd:-xd] += temp_dx__.astype(int) # bilateral accumulation on dx (x, y)
g__ = np.hypot(dy__, dx__) - ave * blob.ncomp # compute g__
# pack all derts into dert__
dert__[:, :, 0] = p__
dert__[:, :, 1] = dy__
dert__[:, :, 2] = dx__
dert__[:, :, 3] = g__
blob.new_dert__[0] = dert__ # pack dert__ into blob
return rng
# ---------- inc_range() end ----------------------------------------------------------------------------------------