本文整理汇总了Python中numpy.putmask函数的典型用法代码示例。如果您正苦于以下问题:Python putmask函数的具体用法?Python putmask怎么用?Python putmask使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了putmask函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: nankurt
def nankurt(values, axis=None, skipna=True):
mask = isnull(values)
if not is_floating_dtype(values):
values = values.astype('f8')
count = _get_counts(mask, axis)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
A = values.sum(axis) / count
B = (values ** 2).sum(axis) / count - A ** 2
C = (values ** 3).sum(axis) / count - A ** 3 - 3 * A * B
D = (values ** 4).sum(axis) / count - A ** 4 - 6 * B * A * A - 4 * C * A
B = _zero_out_fperr(B)
D = _zero_out_fperr(D)
if not isinstance(B, np.ndarray):
# if B is a scalar, check these corner cases first before doing division
if count < 4:
return np.nan
if B == 0:
return 0
result = (((count * count - 1.) * D / (B * B) - 3 * ((count - 1.) ** 2)) /
((count - 2.) * (count - 3.)))
if isinstance(result, np.ndarray):
result = np.where(B == 0, 0, result)
result[count < 4] = np.nan
return result
示例2: nanskew
def nanskew(values, axis=None, skipna=True):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
mask = isnull(values)
count = _get_counts(mask, axis)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
A = values.sum(axis) / count
B = (values ** 2).sum(axis) / count - A ** 2
C = (values ** 3).sum(axis) / count - A ** 3 - 3 * A * B
# floating point error
B = _zero_out_fperr(B)
C = _zero_out_fperr(C)
result = ((np.sqrt((count ** 2 - count)) * C) /
((count - 2) * np.sqrt(B) ** 3))
if isinstance(result, np.ndarray):
result = np.where(B == 0, 0, result)
result[count < 3] = np.nan
return result
else:
result = 0 if B == 0 else result
if count < 3:
return np.nan
return result
示例3: fit_fn
def fit_fn(distr, xvals, alpha, thresh):
"""
The fitted function normalized to 1 above threshold
To normalize to a given total count multiply by the count.
Parameters
----------
xvals : sequence of floats
Values where the function is to be evaluated
alpha : float
The fitted parameter
thresh : float
Threshold value applied to fitted values
Returns
-------
fit : array of floats
Fitted function at the requested xvals
"""
xvals = numpy.array(xvals)
fit = fitfn_dict[distr](xvals, alpha, thresh)
# set fitted values below threshold to 0
numpy.putmask(fit, xvals < thresh, 0.)
return fit
示例4: _reindex_index
def _reindex_index(self, index, method, copy, level, fill_value=np.nan,
limit=None):
if level is not None:
raise Exception('Reindex by level not supported for sparse')
if self.index.equals(index):
if copy:
return self.copy()
else:
return self
if len(self.index) == 0:
return SparseDataFrame(index=index, columns=self.columns)
indexer = self.index.get_indexer(index, method, limit=limit)
indexer = com._ensure_platform_int(indexer)
mask = indexer == -1
need_mask = mask.any()
new_series = {}
for col, series in self.iteritems():
values = series.values
new = values.take(indexer)
if need_mask:
np.putmask(new, mask, fill_value)
new_series[col] = new
return SparseDataFrame(new_series, index=index, columns=self.columns,
default_fill_value=self.default_fill_value)
示例5: cummin
def cummin(self, axis=None, skipna=True):
"""
Return DataFrame of cumulative min over requested axis.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
y : DataFrame
"""
if axis is None:
axis = self._default_stat_axis
else:
axis = self._get_axis_number(axis)
y = self.values.copy()
if not issubclass(y.dtype.type, np.integer):
mask = np.isnan(self.values)
if skipna:
np.putmask(y, mask, np.inf)
result = np.minimum.accumulate(y, axis)
if skipna:
np.putmask(result, mask, np.nan)
else:
result = np.minimum.accumulate(y,axis)
return self._wrap_array(result, self.axes, copy=False)
示例6: factorize
def factorize(values, sort=False, order=None, na_sentinel=-1):
"""
Encode input values as an enumerated type or categorical variable
Parameters
----------
values : sequence
sort :
order :
Returns
-------
"""
hash_klass, values = _get_hash_table_and_cast(values)
uniques = []
table = hash_klass(len(values))
labels, counts = table.get_labels(values, uniques, 0, na_sentinel)
uniques = com._asarray_tuplesafe(uniques)
if sort and len(counts) > 0:
sorter = uniques.argsort()
reverse_indexer = np.empty(len(sorter), dtype=np.int32)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
labels = reverse_indexer.take(labels)
np.putmask(labels, mask, -1)
uniques = uniques.take(sorter)
counts = counts.take(sorter)
return labels, uniques, counts
示例7: _map
def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):
from pandas.core.series import Series
if not len(arr):
return np.ndarray(0, dtype=dtype)
if isinstance(arr, Series):
arr = arr.values
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr, dtype=object)
if na_mask:
mask = isnull(arr)
try:
result = lib.map_infer_mask(arr, f, mask.view(np.uint8))
except (TypeError, AttributeError):
def g(x):
try:
return f(x)
except (TypeError, AttributeError):
return na_value
return _map(g, arr, dtype=dtype)
if na_value is not np.nan:
np.putmask(result, mask, na_value)
if result.dtype == object:
result = lib.maybe_convert_objects(result)
return result
else:
return lib.map_infer(arr, f)
示例8: usefullness
def usefullness(data, targetClass, otherClass = None, **args) :
'''A feature score for discrete data
optional arguments:
threshold
fraction
'''
if 'threshold' in args :
threshold = args['threshold']
else :
threshold = 5
if 'fraction' in args :
fraction = args['fraction']
else :
fraction = 0.0
Y, targetClassSize, otherClassSize, otherI, feature = parseArgs(
data, targetClass, otherClass, **args)
threshold = max(threshold, fraction * float(targetClassSize))
s1 = featureCount(data, targetClass=targetClass, Y=Y, feature=feature)
s2 = featureCount(data, I = otherI, Y=Y,
feature=feature) / float(otherClassSize)
s2 = 1 - s2
numpy.putmask(s2, numpy.less(s1, threshold), 0.0)
return s2
示例9: golub
def golub(data, targetClass, otherClass, **args) :
'''The Golub feature score:
s = (mu1 - mu2) / sqrt(sigma1^2 + sigma2^2)
'''
if 'Y' in args :
Y = args['Y']
targetClassSize = numpy.sum(numpy.equal(Y, targetClass))
otherClassSize = numpy.sum(numpy.equal(Y, otherClass))
else :
Y = None
targetClassSize = data.labels.classSize[targetClass]
otherClassSize = data.labels.classSize[otherClass]
m1 = numpy.array(featureMean(data, targetClass, Y))
m2 = numpy.array(featureMean(data, otherClass, Y))
s1 = numpy.array(featureStd(data, targetClass, Y))
s2 = numpy.array(featureStd(data, otherClass, Y))
s = numpy.sqrt(s1**2 + s2**2)
m = (m1 + m2) / 2.0
# perfect features will have s[i] = 0, so need to take care of that:
numpy.putmask(s, numpy.equal(s, 0), m)
# features that are zero will still have s[i] = 0 so :
numpy.putmask(s, numpy.equal(s, 0) ,1)
g = (m1 - m2) / s
return g
示例10: get_closure_phase
def get_closure_phase(infile='L401323_SB349_uv.dppp.MS',\
triangle = ['TS001','DE601HBA','DE605HBA']):
a=inspect.stack()
stacklevel=0
for k in range(len(a)):
if (string.find(a[k][1],'ipython console')>0):
stacklevel=k
myf=sys._getframe(stacklevel).f_globals
myf['__last_task']='mytask'
myf['taskname']='mytask'
tb=myf['tb']
oroot = infile.split('uv')[0]
for lfile in np.sort(glob.glob(oroot+'*ms')):
os.system('ms2uvfits in='+lfile+' out='+lfile.replace('ms','fits')+' writesyscal=F')
if lfile == infile:
continue
tb.open(lfile+'/ANTENNA')
names = tb.getcol('NAME')
trnum = []
for itr in range(3):
trnum.append(np.argwhere(names==triangle[itr])[0][0])
tb.close()
trnum.sort()
tb.open(lfile)
ant1 = tb.getcol('ANTENNA1')
ant2 = tb.getcol('ANTENNA2')
data = tb.getcol('DATA')
ph12 = +np.angle(data[0,0,(ant1==trnum[0])&(ant2==trnum[1])])
ph23 = +np.angle(data[0,0,(ant1==trnum[1])&(ant2==trnum[2])])
ph31 = -np.angle(data[0,0,(ant1==trnum[0])&(ant2==trnum[2])])
clph = ph12+ph23+ph31
np.putmask(clph,clph>np.pi,clph-2.*np.pi)
np.putmask(clph,clph<-np.pi,clph+2.*np.pi)
# np.savetxt(lfile.replace('ms','txt'),np.unwrap(clph))
np.savetxt(lfile.replace('ms','txt'),clph)
示例11: _reindex_index
def _reindex_index(self, index, method, copy):
if self.index.equals(index):
if copy:
return self.copy()
else:
return self
if len(self.index) == 0:
return SparseDataFrame(index=index, columns=self.columns)
indexer = self.index.get_indexer(index, method)
mask = indexer == -1
need_mask = mask.any()
new_series = {}
for col, series in self.iteritems():
values = series.values
new = values.take(indexer)
if need_mask:
np.putmask(new, mask, nan)
new_series[col] = new
return SparseDataFrame(new_series, index=index, columns=self.columns,
default_fill_value=self.default_fill_value)
示例12: returns
def returns(prices, method='simple', periods=1, fill_method='pad', limit=None, freq=None):
"""
compute the returns for the specified prices.
method: [simple,compound,log], compound is log
"""
if method not in ('simple', 'compound', 'log'):
raise ValueError("Invalid method type. Valid values are ('simple', 'compound')")
if method == 'simple':
return prices.pct_change(periods=periods, fill_method=fill_method, limit=limit, freq=freq)
else:
if freq is not None:
raise NotImplementedError("TODO: implement this logic if needed")
if isinstance(prices, pd.Series):
if fill_method is None:
data = prices
else:
data = prices.fillna(method=fill_method, limit=limit)
data = np.log(data / data.shift(periods=periods))
mask = pd.isnull(prices.values)
np.putmask(data.values, mask, np.nan)
return data
else:
return pd.DataFrame(
{name: returns(col, method, periods, fill_method, limit, freq) for name, col in prices.iteritems()},
columns=prices.columns,
index=prices.index)
示例13: map
def map(self, arg):
"""
Map values of Series using input correspondence (which can be
a dict, Series, or function).
Parameters
----------
arg : function, dict, or Series
Returns
-------
y : Series
same index as caller
"""
if isinstance(arg, (dict, Series)):
if isinstance(arg, dict):
arg = Series(arg)
indexer, mask = tseries.getMergeVec(self, arg.index.indexMap)
newValues = arg.view(np.ndarray).take(indexer)
np.putmask(newValues, -mask, np.nan)
newSer = Series(newValues, index=self.index)
return newSer
else:
return Series([arg(x) for x in self], index=self.index)
示例14: makeGridDomain
def makeGridDomain(cLon, cLat, minLon, maxLon, minLat, maxLat,
margin=2, resolution=0.01):
"""
Generate a grid of the distance and angle of a grid of points
surrounding a storm centre given the location of the storm.
The grid margin and grid size can be set in configuration files.
xMargin, yMargin and gridSize are in degrees
"""
if (type(cLon)==list or type(cLat)==list or
type(cLon)==np.ndarray or type(cLat)==np.ndarray):
raise TypeError, "Input values must be scalar values"
gridSize = int(resolution * 1000)
minLon_ = int(1000 * (minLon)) - int(1000 * margin)
maxLon_ = int(1000 * (maxLon)) + int(1000 * margin) + 1
minLat_ = int(1000 * (minLat)) - int(1000 * margin)
maxLat_ = int(1000 * (maxLat)) + int(1000 * margin) + 1
xGrid = np.array(np.arange(minLon_, maxLon_, gridSize), dtype=int)
yGrid = np.array(np.arange(minLat_, maxLat_, gridSize), dtype=int)
R = gridLatLonDist(cLon, cLat, xGrid / 1000., yGrid / 1000.)
np.putmask(R, R==0, 1e-30)
theta = np.pi / 2. - gridLatLonBear(cLon, cLat,
xGrid / 1000., yGrid / 1000.)
return R, theta
示例15: remapRaster
def remapRaster(infile, out_file, lookup):
'''remap raster values to those in lookup table'''
inmap = gdal.Open(infile)
rows = inmap.RasterYSize
cols = inmap.RasterXSize
map_arr = inmap.ReadAsArray()
#remap values
remap_dict = df.getDictfromCSV(lookup,'\t',1,0)
remap_dict[0]=2000 #ag
remap_dict[255]=32767 #nodata
map_out = map_arr.astype(np.int16)
print 'input map labels', np.unique(map_out)
for r in remap_dict:
print 'reclassifying', r, ': ', remap_dict[r]
outval=int(remap_dict[r])
temp=np.equal(map_out, int(r))
np.putmask(map_out, temp, int(remap_dict[r]))
temp=None
print 'output map labels', np.unique(map_out)
#output raster
driver=inmap.GetDriver()
outDs = driver.Create(out_file, cols, rows, 1, GDT_Int16)
outDs.SetGeoTransform(inmap.GetGeoTransform())
outDs.SetProjection(inmap.GetProjection())
outband = outDs.GetRasterBand(1)
outband.WriteArray(map_out, 0 ,0)
outband.SetNoDataValue(32767)
outband.FlushCache()