本文整理汇总了Python中numpy.cumsum函数的典型用法代码示例。如果您正苦于以下问题:Python cumsum函数的具体用法?Python cumsum怎么用?Python cumsum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cumsum函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: pos_neg_integral
def pos_neg_integral(scores):
"""Works only for 1D arrays at the moment, but can be easily extended."""
scores = np.hstack([[0], scores]) # Padding.
pos_scores, neg_scores = scores.copy(), scores.copy()
idxs = scores >= 0
pos_scores[~idxs], neg_scores[idxs] = 0, 0
return np.cumsum(pos_scores), np.cumsum(neg_scores)
示例2: drawPrfastscore
def drawPrfastscore(tp,fp,scr,tot,show=True):
tp=numpy.cumsum(tp)
fp=numpy.cumsum(fp)
rec=tp/tot
prec=tp/(fp+tp)
#dif=numpy.abs(prec[1:]-rec[1:])
dif=numpy.abs(prec[::-1]-rec[::-1])
pos=dif.argmin()
pos=len(dif)-pos-1
ap=0
for t in numpy.linspace(0,1,11):
pr=prec[rec>=t]
if pr.size==0:
pr=0
p=numpy.max(pr);
ap=ap+p/11;
if show:
pylab.plot(rec,prec,'-g')
pylab.title("AP=%.3f EPRthr=%.3f"%(ap,scr[pos]))
pylab.xlabel("Recall")
pylab.ylabel("Precision")
pylab.grid()
pylab.show()
pylab.draw()
return rec,prec,scr,ap,scr[pos]
示例3: _additive_estimate
def _additive_estimate(events, timeline, _additive_f, _additive_var, reverse):
"""
Called to compute the Kaplan Meier and Nelson-Aalen estimates.
"""
if reverse:
events = events.sort_index(ascending=False)
at_risk = events['entrance'].sum() - events['removed'].cumsum().shift(1).fillna(0)
deaths = events['observed']
estimate_ = np.cumsum(_additive_f(at_risk, deaths)).sort_index().shift(-1).fillna(0)
var_ = np.cumsum(_additive_var(at_risk, deaths)).sort_index().shift(-1).fillna(0)
else:
deaths = events['observed']
at_risk = events['at_risk']
estimate_ = np.cumsum(_additive_f(at_risk, deaths))
var_ = np.cumsum(_additive_var(at_risk, deaths))
timeline = sorted(timeline)
estimate_ = estimate_.reindex(timeline, method='pad').fillna(0)
var_ = var_.reindex(timeline, method='pad')
var_.index.name = 'timeline'
estimate_.index.name = 'timeline'
return estimate_, var_
示例4: Bh_Bv_timeseries
def Bh_Bv_timeseries(igramsFile):
h5file = h5py.File(igramsFile)
igramList = h5file['interferograms'].keys()
Bh_igram=[]
Bv_igram=[]
for igram in igramList:
Bh_igram.append(float(h5file['interferograms'][igram].attrs['H_BASELINE_TOP_HDR']))
Bv_igram.append(float(h5file['interferograms'][igram].attrs['V_BASELINE_TOP_HDR']))
A,B=design_matrix(h5file)
tbase,dateList,dateDict,dateList1 = date_list(h5file)
dt = np.diff(tbase)
Bh_rate=np.dot(np.linalg.pinv(B),Bh_igram)
zero = np.array([0.],np.float32)
Bh = np.concatenate((zero,np.cumsum([Bh_rate*dt])))
Bv_rate=np.dot(np.linalg.pinv(B),Bv_igram)
zero = np.array([0.],np.float32)
Bv = np.concatenate((zero,np.cumsum([Bv_rate*dt])))
h5file.close()
return Bh,Bv
示例5: cumulative_moment
def cumulative_moment(self, year, mag):
'''Calculation of Mmax using aCumulative Moment approach, adapted from
the cumulative strain energy method of Makropoulos & Burton (1983)
:param year: Year of Earthquake
:type year: numpy.ndarray
:param mag: Magnitude of Earthquake
:type mag: numpy.ndarray
:keyword iplot: Include cumulative moment plot
:type iplot: Boolean
:return mmax: Returns Maximum Magnitude
:rtype mmax: Float
'''
# Calculate seismic moment
m_o = 10. ** (9.05 + 1.5 * mag)
year_range = np.arange(np.min(year), np.max(year) + 1, 1)
nyr = np.shape(year_range)[0]
morate = np.zeros(nyr, dtype=float)
# Get moment release per year
for loc, tyr in enumerate(year_range):
idx = np.abs(year - tyr) < 1E-5
if np.sum(idx) > 0:
# Some moment release in that year
morate[loc] = np.sum(m_o[idx])
ave_morate = np.sum(morate) / nyr
# Average moment rate vector
exp_morate = np.cumsum(ave_morate * np.ones(nyr))
modiff = (np.abs(np.max(np.cumsum(morate) - exp_morate)) +
np.abs(np.min(np.cumsum(morate) - exp_morate)))
# Return back to Mw
if fabs(modiff) < 1E-20:
return -np.inf
mmax = (2. / 3.) * (np.log10(modiff) - 9.05)
return mmax
示例6: boxfilter
def boxfilter(I, r):
"""Fast box filter implementation.
Parameters
----------
I: a single channel/gray image data normalized to [0.0, 1.0]
r: window radius
Return
-----------
The filtered image data.
"""
M, N = I.shape
dest = np.zeros((M, N))
# cumulative sum over Y axis
sumY = np.cumsum(I, axis=0)
# difference over Y axis
dest[:r + 1] = sumY[r: 2 * r + 1]
dest[r + 1:M - r] = sumY[2 * r + 1:] - sumY[:M - 2 * r - 1]
dest[-r:] = np.tile(sumY[-1], (r, 1)) - sumY[M - 2 * r - 1:M - r - 1]
# cumulative sum over X axis
sumX = np.cumsum(dest, axis=1)
# difference over Y axis
dest[:, :r + 1] = sumX[:, r:2 * r + 1]
dest[:, r + 1:N - r] = sumX[:, 2 * r + 1:] - sumX[:, :N - 2 * r - 1]
dest[:, -r:] = np.tile(sumX[:, -1][:, None], (1, r)) - \
sumX[:, N - 2 * r - 1:N - r - 1]
return dest
示例7: _divide
def _divide(self):
block_size = self.spec.block_size # shortcut
half_block = (block_size-1)/2
rows, columns = self.dividing.nonzero()
for i in range(len(rows)):
row = rows[i]
column = columns[i]
write_block(self._cell_block, self.cells, row, column, block_size)
cv2.filter2D(self._cell_block, cv2.CV_32F, self._tension_kernel,
self._probability, borderType=cv2.BORDER_CONSTANT)
cv2.threshold(self._probability, self._tension_min, 0,
cv2.THRESH_TOZERO, self._probability)
self._probability[self._cell_block] = 0
self._probability **= self.spec.tension_power
self._probability *= self._distance_kernel
# optimized version of np.random.choice
np.cumsum(self._probability.flat, out=self._cumulative)
total = self._cumulative[-1]
if total < 1.0e-12:
# no viable placements, we'll have precision problems anyways
continue
self._cumulative /= total
index = self._indices[np.searchsorted(self._cumulative,
rdm.random())]
local_row, local_column = np.unravel_index(index,
self._probability.shape)
self.set_alive(row+(local_row-half_block),
column+(local_column-half_block))
示例8: SNfunc
def SNfunc(self,data,sig,significancefloor=0.5):
D=data.ravel()
S=sig.ravel()
args=numpy.argsort(-D/S)
D=numpy.take(D,args)
S=numpy.take(S,args)
Dsum=numpy.cumsum(D)
Ssum=numpy.cumsum(S**2)**0.5
SN=(Dsum/Ssum).max()
#regional SN
import scipy.ndimage as ndimage
data[data/sig<significancefloor]=0
masks, multiplicity = ndimage.measurements.label(data)
labels=numpy.arange(1, multiplicity+1)
SNs=numpy.zeros(multiplicity+1)
SNs[0]=SN
for i in range(multiplicity):
D=data[masks==i+1].ravel()
S=sig[masks==i+1].ravel()
args=numpy.argsort(-D/S)
D=numpy.take(D,args)
S=numpy.take(S,args)
Dsum=numpy.cumsum(D)
Ssum=numpy.cumsum(S**2)**0.5
SNi=(Dsum/Ssum).max()
SNs[i+1]=SNi
SNs=-numpy.sort(-SNs)
return SNs
示例9: windower
def windower(thing, max_radius):
thing_pad = numpy.concatenate((
thing[-max_radius:], thing, thing[:max_radius]
))
thing_sum = numpy.cumsum(numpy.cumsum(thing_pad))
return (len(thing), thing_sum, max_radius)
示例10: _generate_sample
def _generate_sample(self, X, nn_data, nn_num, row, col, step):
"""Generate a synthetic sample with an additional steps for the
categorical features.
Each new sample is generated the same way than in SMOTE. However, the
categorical features are mapped to the most frequent nearest neighbors
of the majority class.
"""
rng = check_random_state(self.random_state)
sample = super(SMOTENC, self)._generate_sample(X, nn_data, nn_num,
row, col, step)
# To avoid conversion and since there is only few samples used, we
# convert those samples to dense array.
sample = (sample.toarray().squeeze()
if sparse.issparse(sample) else sample)
all_neighbors = nn_data[nn_num[row]]
all_neighbors = (all_neighbors.toarray()
if sparse.issparse(all_neighbors) else all_neighbors)
categories_size = ([self.continuous_features_.size] +
[cat.size for cat in self.ohe_.categories_])
for start_idx, end_idx in zip(np.cumsum(categories_size)[:-1],
np.cumsum(categories_size)[1:]):
col_max = all_neighbors[:, start_idx:end_idx].sum(axis=0)
# tie breaking argmax
col_sel = rng.choice(np.flatnonzero(
np.isclose(col_max, col_max.max())))
sample[start_idx:end_idx] = 0
sample[start_idx + col_sel] = 1
return sparse.csr_matrix(sample) if sparse.issparse(X) else sample
示例11: _major_slice
def _major_slice(self, idx, copy=False):
"""Index along the major axis where idx is a slice object.
"""
if idx == slice(None):
return self.copy() if copy else self
M, N = self._swap(self.shape)
start, stop, step = idx.indices(M)
M = len(xrange(start, stop, step))
new_shape = self._swap((M, N))
if M == 0:
return self.__class__(new_shape)
row_nnz = np.diff(self.indptr)
idx_dtype = self.indices.dtype
res_indptr = np.zeros(M+1, dtype=idx_dtype)
np.cumsum(row_nnz[idx], out=res_indptr[1:])
if step == 1:
all_idx = slice(self.indptr[start], self.indptr[stop])
res_indices = np.array(self.indices[all_idx], copy=copy)
res_data = np.array(self.data[all_idx], copy=copy)
else:
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_row_slice(start, stop, step, self.indptr, self.indices,
self.data, res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
示例12: _major_index_fancy
def _major_index_fancy(self, idx):
"""Index along the major axis where idx is an array of ints.
"""
idx_dtype = self.indices.dtype
indices = np.asarray(idx, dtype=idx_dtype).ravel()
_, N = self._swap(self.shape)
M = len(indices)
new_shape = self._swap((M, N))
if M == 0:
return self.__class__(new_shape)
row_nnz = np.diff(self.indptr)
idx_dtype = self.indices.dtype
res_indptr = np.zeros(M+1, dtype=idx_dtype)
np.cumsum(row_nnz[idx], out=res_indptr[1:])
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_row_index(M, indices, self.indptr, self.indices, self.data,
res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
示例13: resize
def resize(self, *shape):
shape = check_shape(shape)
if hasattr(self, 'blocksize'):
bm, bn = self.blocksize
new_M, rm = divmod(shape[0], bm)
new_N, rn = divmod(shape[1], bn)
if rm or rn:
raise ValueError("shape must be divisible into %s blocks. "
"Got %s" % (self.blocksize, shape))
M, N = self.shape[0] // bm, self.shape[1] // bn
else:
new_M, new_N = self._swap(shape)
M, N = self._swap(self.shape)
if new_M < M:
self.indices = self.indices[:self.indptr[new_M]]
self.data = self.data[:self.indptr[new_M]]
self.indptr = self.indptr[:new_M + 1]
elif new_M > M:
self.indptr = np.resize(self.indptr, new_M + 1)
self.indptr[M + 1:].fill(self.indptr[M])
if new_N < N:
mask = self.indices < new_N
if not np.all(mask):
self.indices = self.indices[mask]
self.data = self.data[mask]
major_index, val = self._minor_reduce(np.add, mask)
self.indptr.fill(0)
self.indptr[1:][major_index] = val
np.cumsum(self.indptr, out=self.indptr)
self._shape = shape
示例14: __init__
def __init__(self, b, bResolution, mu, nuXY, nuErr, massT=172.0, widthT=widthTop, massW=80.4, zPlus=True):
for key, val in zip(
["", "XY", "Z", "E", "T2", "T", "Phi"],
[mu, np.array([mu.x(), mu.y()]), mu.z(), mu.e(), mu.Perp2(), mu.Pt(), mu.Phi()],
):
setattr(self, "mu" + key, val)
for key, val in zip(
["massW2", "massT", "invT", "bound", "sign", "rawB", "nuXY", "fitNu"],
[massW ** 2, massT, 1.0 / widthT, False, [-1, 1][zPlus], b, nuXY, utils.LorentzV()],
):
setattr(self, key, val)
self.bXY = np.array([b.x(), b.y()])
eig, self.Einv = np.linalg.eig(nuErr)
self.E = self.Einv.T
self.inv = 1.0 / np.append([bResolution], np.sqrt(np.maximum(1, eig)))
self.setFittedNu(nuXY)
_, self.rawW, self.rawT = np.cumsum([mu, self.fitNu, self.rawB])
self.residualsBSLT = self.fit()
self.chi2 = self.residualsBSLT.dot(self.residualsBSLT)
_, self.fitW, self.fitT = np.cumsum([mu, self.fitNu, self.fitB])
示例15: tests
def tests(self, distribution='exp', pdelete=0., independent=True, dither=0., tilewindow=1.0):
assert distribution in ['exp', 'exponential', 'poisson', 'regular']
samplerate = 0.1 # ms
spikerate = 0.001 # firing rate
nspikes = 100 # number of spikes to test
if distribution in ['exp', 'exponential']:
st1 = np.random.exponential(1./spikerate, nspikes)
st1 = np.cumsum(st1)
elif distribution == 'regular':
st1 = np.linspace(int(10./samplerate),
int(9000./samplerate), int(10./samplerate))
elif distribution == 'poisson':
st1 = np.random.poisson(1./spikerate, nspikes)
st1 = np.cumsum(st1)
if independent:
st2 = np.random.exponential(1./spikerate, nspikes)
st2 = np.cumsum(st1)
else:
st2 = st1
st2 = np.random.choice(st2,
int((1.0-pdelete)*st1.shape[0]), replace=False)
if dither > 0:
st2 = st2 + np.random.randn(len(st2))*dither
# print('len st1, st2: ', len(st1), len(st2), np.max(st1), np.max(st2))
self.set_spikes(samplerate, st1, st2, tilewindow=tilewindow)
sttc = self.calc_sttc()
print('# of spikes in spike train 1: {0:d}, in spike train 2: {1:d} '.format(st1.shape[0], st2.shape[0]))
print('STTC value: {0:.3f} '.format(sttc))
self.plot_sttc(st1, st2)