本文整理汇总了Python中numpy.ma.concatenate方法的典型用法代码示例。如果您正苦于以下问题:Python ma.concatenate方法的具体用法?Python ma.concatenate怎么用?Python ma.concatenate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy.ma
的用法示例。
在下文中一共展示了ma.concatenate方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: transform_non_affine
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import concatenate [as 别名]
def transform_non_affine(self, points):
if self._x.is_affine and self._y.is_affine:
return points
x = self._x
y = self._y
if x == y and x.input_dims == 2:
return x.transform_non_affine(points)
if x.input_dims == 2:
x_points = x.transform_non_affine(points)[:, 0:1]
else:
x_points = x.transform_non_affine(points[:, 0])
x_points = x_points.reshape((len(x_points), 1))
if y.input_dims == 2:
y_points = y.transform_non_affine(points)[:, 1:]
else:
y_points = y.transform_non_affine(points[:, 1])
y_points = y_points.reshape((len(y_points), 1))
if isinstance(x_points, MaskedArray) or isinstance(y_points, MaskedArray):
return ma.concatenate((x_points, y_points), 1)
else:
return np.concatenate((x_points, y_points), 1)
示例2: read
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import concatenate [as 别名]
def read(self, indexes=None, **kwargs):
"""
Read reprojected & resampled input data.
Parameters
----------
indexes : integer or list
band number or list of band numbers
Returns
-------
data : array
"""
band_indexes = self._get_band_indexes(indexes)
arr = self.process.get_raw_output(self.tile)
return (
arr[band_indexes[0] - 1]
if len(band_indexes) == 1
else ma.concatenate([ma.expand_dims(arr[i - 1], 0) for i in band_indexes])
)
示例3: concatenate
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import concatenate [as 别名]
def concatenate(self,value,axis=0):
""" Concatentate UncertContainer value to self.
Assumes that if dimensions of self and value do not match, to
add a np.newaxis along axis of value
"""
if isinstance(value,UncertContainer):
if value.vals.ndim == self.vals.ndim:
vals = value.vals
dmin = value.dmin
dmax = value.dmax
wt = value.wt
uncert = value.uncert
mask = value.mask
elif (value.vals.ndim + 1) == self.vals.ndim:
vals = ma.expand_dims(value.vals,axis)
dmin = ma.expand_dims(value.dmin,axis)
dmax = ma.expand_dims(value.dmax,axis)
wt = ma.expand_dims(value.wt,axis)
uncert = ma.expand_dims(value.uncert,axis)
mask = np.expand_dims(value.mask,axis)
else:
raise ValueError('Could not propery match dimensionality')
self.vals = ma.concatenate((self.vals,vals),axis=axis)
self.dmin = ma.concatenate((self.dmin,dmin),axis=axis)
self.dmax = ma.concatenate((self.dmax,dmax),axis=axis)
self.wt = ma.concatenate((self.wt,wt),axis=axis)
self.uncert = ma.concatenate((self.uncert,uncert),axis=axis)
self.mask = np.concatenate((self.mask,mask),axis=axis)
else:
raise ValueError('Can only concatenate with an UncertContainer object')
示例4: segment_hits
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import concatenate [as 别名]
def segment_hits(cx, cy, x, y, radius):
"""
Determine if any line segments are within radius of a
point. Returns the list of line segments that are within that
radius.
"""
# Process single points specially
if len(x) < 2:
res, = np.nonzero((cx - x) ** 2 + (cy - y) ** 2 <= radius ** 2)
return res
# We need to lop the last element off a lot.
xr, yr = x[:-1], y[:-1]
# Only look at line segments whose nearest point to C on the line
# lies within the segment.
dx, dy = x[1:] - xr, y[1:] - yr
Lnorm_sq = dx ** 2 + dy ** 2 # Possibly want to eliminate Lnorm==0
u = ((cx - xr) * dx + (cy - yr) * dy) / Lnorm_sq
candidates = (u >= 0) & (u <= 1)
#if any(candidates): print "candidates",xr[candidates]
# Note that there is a little area near one side of each point
# which will be near neither segment, and another which will
# be near both, depending on the angle of the lines. The
# following radius test eliminates these ambiguities.
point_hits = (cx - x) ** 2 + (cy - y) ** 2 <= radius ** 2
#if any(point_hits): print "points",xr[candidates]
candidates = candidates & ~(point_hits[:-1] | point_hits[1:])
# For those candidates which remain, determine how far they lie away
# from the line.
px, py = xr + u * dx, yr + u * dy
line_hits = (cx - px) ** 2 + (cy - py) ** 2 <= radius ** 2
#if any(line_hits): print "lines",xr[candidates]
line_hits = line_hits & candidates
points, = point_hits.ravel().nonzero()
lines, = line_hits.ravel().nonzero()
#print points,lines
return np.concatenate((points, lines))
示例5: find_duplicates
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import concatenate [as 别名]
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
示例6: mannwhitneyu
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import concatenate [as 别名]
def mannwhitneyu(x,y, use_continuity=True):
"""
Computes the Mann-Whitney statistic
Missing values in `x` and/or `y` are discarded.
Parameters
----------
x : sequence
Input
y : sequence
Input
use_continuity : {True, False}, optional
Whether a continuity correction (1/2.) should be taken into account.
Returns
-------
statistic : float
The Mann-Whitney statistics
pvalue : float
Approximate p-value assuming a normal distribution.
"""
x = ma.asarray(x).compressed().view(ndarray)
y = ma.asarray(y).compressed().view(ndarray)
ranks = rankdata(np.concatenate([x,y]))
(nx, ny) = (len(x), len(y))
nt = nx + ny
U = ranks[:nx].sum() - nx*(nx+1)/2.
U = max(U, nx*ny - U)
u = nx*ny - U
mu = (nx*ny)/2.
sigsq = (nt**3 - nt)/12.
ties = count_tied_groups(ranks)
sigsq -= np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/12.
sigsq *= nx*ny/float(nt*(nt-1))
if use_continuity:
z = (U - 1/2. - mu) / ma.sqrt(sigsq)
else:
z = (U - mu) / ma.sqrt(sigsq)
prob = special.erfc(abs(z)/np.sqrt(2))
return MannwhitneyuResult(u, prob)
示例7: ks_twosamp
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import concatenate [as 别名]
def ks_twosamp(data1, data2, alternative="two-sided"):
"""
Computes the Kolmogorov-Smirnov test on two samples.
Missing values are discarded.
Parameters
----------
data1 : array_like
First data set
data2 : array_like
Second data set
alternative : {'two-sided', 'less', 'greater'}, optional
Indicates the alternative hypothesis. Default is 'two-sided'.
Returns
-------
d : float
Value of the Kolmogorov Smirnov test
p : float
Corresponding p-value.
"""
(data1, data2) = (ma.asarray(data1), ma.asarray(data2))
(n1, n2) = (data1.count(), data2.count())
n = (n1*n2/float(n1+n2))
mix = ma.concatenate((data1.compressed(), data2.compressed()))
mixsort = mix.argsort(kind='mergesort')
csum = np.where(mixsort < n1, 1./n1, -1./n2).cumsum()
# Check for ties
if len(np.unique(mix)) < (n1+n2):
csum = csum[np.r_[np.diff(mix[mixsort]).nonzero()[0],-1]]
alternative = str(alternative).lower()[0]
if alternative == 't':
d = ma.abs(csum).max()
prob = special.kolmogorov(np.sqrt(n)*d)
elif alternative == 'l':
d = -csum.min()
prob = np.exp(-2*n*d**2)
elif alternative == 'g':
d = csum.max()
prob = np.exp(-2*n*d**2)
else:
raise ValueError("Invalid value for the alternative hypothesis: "
"should be in 'two-sided', 'less' or 'greater'")
return (d, prob)
示例8: recache
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import concatenate [as 别名]
def recache(self, always=False):
if always or self._invalidx:
xconv = self.convert_xunits(self._xorig)
if ma.isMaskedArray(self._xorig):
x = ma.asarray(xconv, np.float_)
else:
x = np.asarray(xconv, np.float_)
x = x.ravel()
else:
x = self._x
if always or self._invalidy:
yconv = self.convert_yunits(self._yorig)
if ma.isMaskedArray(self._yorig):
y = ma.asarray(yconv, np.float_)
else:
y = np.asarray(yconv, np.float_)
y = y.ravel()
else:
y = self._y
if len(x) == 1 and len(y) > 1:
x = x * np.ones(y.shape, np.float_)
if len(y) == 1 and len(x) > 1:
y = y * np.ones(x.shape, np.float_)
if len(x) != len(y):
raise RuntimeError('xdata and ydata must be the same length')
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if ma.isMaskedArray(x) or ma.isMaskedArray(y):
self._xy = ma.concatenate((x, y), 1)
else:
self._xy = np.concatenate((x, y), 1)
self._x = self._xy[:, 0] # just a view
self._y = self._xy[:, 1] # just a view
self._subslice = False
if (self.axes and len(x) > 100 and self._is_sorted(x) and
self.axes.name == 'rectilinear' and
self.axes.get_xscale() == 'linear' and
self._markevery is None):
self._subslice = True
if hasattr(self, '_path'):
interpolation_steps = self._path._interpolation_steps
else:
interpolation_steps = 1
self._path = Path(self._xy, None, interpolation_steps)
self._transformed_path = None
self._invalidx = False
self._invalidy = False
示例9: mannwhitneyu
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import concatenate [as 别名]
def mannwhitneyu(x,y, use_continuity=True):
"""
Computes the Mann-Whitney statistic
Missing values in `x` and/or `y` are discarded.
Parameters
----------
x : sequence
Input
y : sequence
Input
use_continuity : {True, False}, optional
Whether a continuity correction (1/2.) should be taken into account.
Returns
-------
u : float
The Mann-Whitney statistics
prob : float
Approximate p-value assuming a normal distribution.
"""
x = ma.asarray(x).compressed().view(ndarray)
y = ma.asarray(y).compressed().view(ndarray)
ranks = rankdata(np.concatenate([x,y]))
(nx, ny) = (len(x), len(y))
nt = nx + ny
U = ranks[:nx].sum() - nx*(nx+1)/2.
U = max(U, nx*ny - U)
u = nx*ny - U
#
mu = (nx*ny)/2.
sigsq = (nt**3 - nt)/12.
ties = count_tied_groups(ranks)
sigsq -= np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/12.
sigsq *= nx*ny/float(nt*(nt-1))
#
if use_continuity:
z = (U - 1/2. - mu) / ma.sqrt(sigsq)
else:
z = (U - mu) / ma.sqrt(sigsq)
prob = special.erfc(abs(z)/np.sqrt(2))
return (u, prob)
示例10: recache
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import concatenate [as 别名]
def recache(self, always=False):
if always or self._invalidx:
xconv = self.convert_xunits(self._xorig)
if ma.isMaskedArray(self._xorig):
x = ma.asarray(xconv, np.float_)
else:
x = np.asarray(xconv, np.float_)
x = x.ravel()
else:
x = self._x
if always or self._invalidy:
yconv = self.convert_yunits(self._yorig)
if ma.isMaskedArray(self._yorig):
y = ma.asarray(yconv, np.float_)
else:
y = np.asarray(yconv, np.float_)
y = y.ravel()
else:
y = self._y
if len(x) == 1 and len(y) > 1:
x = x * np.ones(y.shape, np.float_)
if len(y) == 1 and len(x) > 1:
y = y * np.ones(x.shape, np.float_)
if len(x) != len(y):
raise RuntimeError('xdata and ydata must be the same length')
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if ma.isMaskedArray(x) or ma.isMaskedArray(y):
self._xy = ma.concatenate((x, y), 1)
else:
self._xy = np.concatenate((x, y), 1)
self._x = self._xy[:, 0] # just a view
self._y = self._xy[:, 1] # just a view
self._subslice = False
if (self.axes and len(x) > 100 and self._is_sorted(x) and
self.axes.name == 'rectilinear' and
self.axes.get_xscale() == 'linear' and
self._markevery is None and
self.get_clip_on() is True):
self._subslice = True
if hasattr(self, '_path'):
interpolation_steps = self._path._interpolation_steps
else:
interpolation_steps = 1
self._path = Path(self._xy, None, interpolation_steps)
self._transformed_path = None
self._invalidx = False
self._invalidy = False
示例11: mannwhitneyu
# 需要导入模块: from numpy import ma [as 别名]
# 或者: from numpy.ma import concatenate [as 别名]
def mannwhitneyu(x,y, use_continuity=True):
"""
Computes the Mann-Whitney statistic
Missing values in `x` and/or `y` are discarded.
Parameters
----------
x : sequence
Input
y : sequence
Input
use_continuity : {True, False}, optional
Whether a continuity correction (1/2.) should be taken into account.
Returns
-------
statistic : float
The Mann-Whitney statistics
pvalue : float
Approximate p-value assuming a normal distribution.
"""
x = ma.asarray(x).compressed().view(ndarray)
y = ma.asarray(y).compressed().view(ndarray)
ranks = rankdata(np.concatenate([x,y]))
(nx, ny) = (len(x), len(y))
nt = nx + ny
U = ranks[:nx].sum() - nx*(nx+1)/2.
U = max(U, nx*ny - U)
u = nx*ny - U
mu = (nx*ny)/2.
sigsq = (nt**3 - nt)/12.
ties = count_tied_groups(ranks)
sigsq -= sum(v*(k**3-k) for (k,v) in iteritems(ties))/12.
sigsq *= nx*ny/float(nt*(nt-1))
if use_continuity:
z = (U - 1/2. - mu) / ma.sqrt(sigsq)
else:
z = (U - mu) / ma.sqrt(sigsq)
prob = special.erfc(abs(z)/np.sqrt(2))
return MannwhitneyuResult(u, prob)