本文整理汇总了Python中scipy.floor函数的典型用法代码示例。如果您正苦于以下问题:Python floor函数的具体用法?Python floor怎么用?Python floor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了floor函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: prob4
def prob4(filename='saw.wav', new_rate = 11025, outfile='prob4.wav'):
"""Down-samples a given .wav file to a new rate and saves the resulting
signal as another .wav file.
Parameters
----------
filename : string, optional
The name of the .wav sound file to be down-sampled.
Defaults to 'saw.wav'.
new_rate : integer, optional
The down-sampled rate. Defaults to 11025.
outfile : string, optional
The name of the new file. Defaults to prob4.wav.
Returns
-------
None
"""
old_rate, in_sig = wavfile.read(filename)
fin = fftw.fft(sp.float32(in_sig))
# Use if scipy_fftpack is unavailable
# fin = sp.fft(sp.float32(in_sig))
nsiz = sp.floor(in_sig.size * new_rate / old_rate)
nsizh = sp.floor(nsiz / 2)
fout = sp.zeros(nsiz) + 0j
fout[0:nsizh] = fin[0:nsizh]
fout[nsiz-nsizh+1:] = sp.conj(sp.flipud(fout[1:nsizh]))
out = sp.real(sp.ifft(fout))
out = sp.int16(out/sp.absolute(out).max() * 32767)
plot_signal(filename)
wavfile.write('prob4.wav',new_rate,out)
print ""; plot_signal('prob4.wav')
示例2: get_4_squares
def get_4_squares(parent1, parent2):
n_folds = 2
levels1 = np.unique(parent1)
levels2 = np.unique(parent2)
N1 = len(levels1)
N2 = len(levels2)
r1 = sp.random.permutation(N1)
r2 = sp.random.permutation(N2)
Icv1 = sp.floor(((sp.ones((N1))*n_folds)*r1)/N1)
Icv2 = sp.floor(((sp.ones((N2))*n_folds)*r2)/N2)
train_parents1 = levels1[Icv1 != 0]
train_parents2 = levels2[Icv2 != 0]
test_parents1 = levels1[Icv1 == 0]
test_parents2 = levels2[Icv2 == 0]
train_ind1 = np.array([e in train_parents1 for e in parent1], dtype=bool)
train_ind2 = np.array([e in train_parents2 for e in parent2], dtype=bool)
test_ind1 = np.array([e in test_parents1 for e in parent1], dtype=bool)
test_ind2 = np.array([e in test_parents2 for e in parent2], dtype=bool)
Itest = test_ind1 & test_ind2
Itrain_distant = train_ind1 & train_ind2
Itrain_close1 = (train_ind1 & test_ind2)
Itrain_close2 = (train_ind2 & test_ind1)
Itrain_close = select_subset(Itrain_close1 | Itrain_close2, Itest.sum())
return Itest, Itrain_distant, Itrain_close1, Itrain_close2, Itrain_close
示例3: down_sample
def down_sample(filename, new_rate, outputfile=None):
"""
Create a down-sampled copy of the provided .wav file. Unless overridden, the output
file will be of the form "down_<orginalname>.wav"
Parameters
----------
filename : string
input .wav file
new_rate : int
sample rate of output file
outputfile : string
name of output file
"""
if outputfile is None:
outputfile = "down_" + filename
old_rate, in_sig = wavfile.read(filename)
in_sig = sp.float32(in_sig)
fin = sp.fft(in_sig)
nsiz = sp.floor(in_sig.size * new_rate / old_rate)
nsizh = sp.floor(nsiz / 2)
fout = sp.zeros(nsiz)
fout = fout + 0j
fout[0:nsizh] = fin[0:nsizh]
fout[nsiz - nsizh + 1 :] = sp.conj(sp.flipud(fout[1:nsizh]))
out = sp.ifft(fout)
out = sp.real(out) # Take the real component of the signal
out = sp.int16(out / sp.absolute(out).max() * 32767)
wavfile.write(outputfile, new_rate, out)
示例4: f_save_data
def f_save_data(self, outputfile):
''' Save treated data to ASCII '''
if outputfile!= '':
self.outputdir = outputfile
if self.inputdir=='':
self.inputdir=self.outputdir
thinout = int(self.lineEdit_13.text())
# Sauvegarde de la montee et de la descente du champ
if self.checkBox_7.isChecked() and self.checkBox_8.isChecked():
out_data = zeros((floor(len(self.data[0::thinout, 0])), 3+2*+len(self.coldata)))
out_data[:, 0] = self.data[0::thinout, 0]
out_data[:, 1] = self.data[0::thinout, 2+len(self.colref)+len(self.coldata)]/self.pu_area
out_data[:, 2] = self.data[0::thinout, 1]
for j in range(0, len(self.coldata)):
out_data[:, 2*j+3] = self.sig_out[0::thinout, 2*j]/self.intgain*1e3
out_data[:, 2*j+4] = self.sig_out[0::thinout, 2*j+1]/self.intgain*1e3
# Sauvegarde de la montee du champ uniquement
elif self.checkBox_7.isChecked():
out_data = zeros((floor(len(self.data[0:self.f_max:thinout, 0])), 3+2*+len(self.coldata)))
out_data[:, 0] = self.data[0:self.f_max:thinout, 0]
out_data[:, 1] = self.data[0:self.f_max:thinout, 2+len(self.colref)+len(self.coldata)]/self.pu_area
out_data[:, 2] = self.data[0:self.f_max:thinout, 1]
for j in range(0, len(self.coldata)):
out_data[:, 2*j+3] = self.sig_out[0:self.f_max:thinout, 2*j]/self.intgain*1e3
out_data[:, 2*j+4] = self.sig_out[0:self.f_max:thinout, 2*j+1]/self.intgain*1e3
# Sauvegarde de la descente du champ uniquement
elif self.checkBox_8.isChecked():
out_data = zeros((floor(len(self.data[self.f_max::thinout, 0])), 3+2*+len(self.coldata)))
out_data[:, 0] = self.data[self.f_max::thinout, 0]
out_data[:, 1] = self.data[self.f_max::thinout, 2+len(self.colref)+len(self.coldata)]/self.pu_area
out_data[:, 2] = self.data[self.f_max::thinout, 1]
for j in range(0, len(self.coldata)):
out_data[:, 2*j+3] = self.sig_out[self.f_max::thinout, 2*j]/self.intgain*1e3
out_data[:, 2*j+4] = self.sig_out[self.f_max::thinout, 2*j+1]/self.intgain*1e3
# Sinon on sauvegarde tout
else:
out_data = zeros((floor(len(self.data[0::thinout, 0])), 3+2*+len(self.coldata)))
out_data[:, 0] = self.data[0::thinout, 0]
out_data[:, 1] = self.data[0::thinout, 2+len(self.colref)+len(self.coldata)]/self.pu_area
out_data[:, 2] = self.data[0::thinout, 1]
for j in range(0, len(self.coldata)):
out_data[:, 2*j+3] = self.sig_out[0::thinout, 2*j]/self.intgain*1e3
out_data[:, 2*j+4] = self.sig_out[0::thinout, 2*j+1]/self.intgain*1e3
f_handle = file(str(outputfile), 'w')
f_handle.write('#time\tB\tdBdt\tin_phase\tout_phase\n')
savetxt(f_handle, out_data[0:len(out_data[:, 0])-2, :], fmt = '%10g', delimiter = '\t')
f_handle.close()
self.label_23.setText('Data saved')
示例5: test_optdiv1
def test_optdiv1(beta=0.9, pHigh=0.75, grid=scipy.arange(21.0), useValueIter=True):
time1 = time.time()
localvars = {}
def postVIterCallbackFn(nIter, currentVArray, newVArray, optControls, stoppingResult):
global g_iterList
(stoppingDecision, diff) = stoppingResult
print("iter %d, diff %f" % (nIter, diff))
localvars[0] = nIter
def postPIterCallbackFn(nIter, newVArray, currentPolicyArrayList, greedyPolicyList, stoppingResult):
(stoppingDecision, diff) = stoppingResult
print("iter %d, diff %f" % (nIter, diff))
localvars[0] = nIter
initialVArray = grid; # initial guess for V: a linear fn
initialPolicyArray = grid; # initial guess for d: pay out everything
utilityFn = lambda x: x; # linear utility
zStates = [-1.0, 1.0];
zProbs = [1.0-pHigh, pHigh]; # income shock
params = OptDivParams1(utilityFn, beta, zStates, zProbs, grid); # don't use parallel search with this, since it makes a callback to Python
if (useValueIter == True):
result = bellman.grid_valueIteration([grid], initialVArray, params, postIterCallbackFn=postVIterCallbackFn, parallel=False)
(nIter, currentVArray, newVArray, optControls) = result
else:
result = bellman.grid_policyIteration([grid], [initialPolicyArray], initialVArray, params, postIterCallbackFn=postPIterCallbackFn, parallel=False)
(nIter, currentVArray, currentPolicyArrayList, greedyPolicyList) = result
newVArray = currentVArray
optControls = currentPolicyArrayList
time2 = time.time()
nIters = localvars[0]
print("total time: %f, avg time: %f" % (time2-time1, (time2-time1)/nIters))
print("x_0 == 0: %d" % alwaysPayAll(beta, pHigh))
n0 = getn0(beta, pHigh)
optd_fn = linterp.LinInterp1D(grid, optControls[0])
print("n0: %f, d(floor(n0)): %f" % (n0, optd_fn(scipy.floor(n0))))
# plot V
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(grid, newVArray)
ax.set_xlabel("M")
ax.set_ylabel("V")
# plot optimal d
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(grid, optControls[0])
ax.axvline(scipy.floor(n0), color='gray')
ax.set_xlabel("M")
ax.set_ylabel("optimal d")
plt.show()
return result
示例6: get_cb_ticks
def get_cb_ticks(values):
min_tick = sp.nanmin(values)
max_tick = sp.nanmax(values)
med_tick = min_tick + (max_tick - min_tick) / 2.0
if max_tick > 1.0:
min_tick = sp.ceil(min_tick)
max_tick = sp.floor(max_tick)
med_tick = sp.around(med_tick)
else:
min_tick = sp.ceil(min_tick * 100.0) / 100.0
max_tick = sp.floor(max_tick * 100.0) / 100.0
med_tick = sp.around(med_tick, 2)
return [min_tick, med_tick, max_tick]
示例7: draw_from_Q_true
def draw_from_Q_true(N, bbox):
# Draw xs and ys from a normal distribution
vis = sp.random.randn(2*N,2)
# Create bimodal distribution
ncut = int(sp.floor(2*N/3))
xis = vis[:,0]
yis = vis[:,1]
yis[:ncut] -= 2.0
yis[ncut:] += 2.0
xis[:ncut] -= 2.0
xis[ncut:] *= 2.0
xis[ncut:] += 1.0
# Shuffle xis and yis
indices = sp.arange(len(vis))
sp.random.shuffle(indices)
xis = xis[indices]
yis = yis[indices]
# Select exactly N data points
indices = (xis > bbox[0]) & (xis < bbox[1]) & (yis > bbox[2]) & (yis < bbox[3])
xis = xis[indices]
xis = xis[:N]
yis = yis[indices]
yis = yis[:N]
return xis, yis
示例8: normalizeLength
def normalizeLength(self, noteOns, factor):
#shibu = 60. / self.wavetempo * (self.binarized_data[0].size / self.duration)
shibu = (self.fs/10.) / (self.wavetempo/60.)
fixToResolution = noteOns/shibu*480.
fixToResolution[:, 2] = noteOns[:, 2]
# MIDI_Res(分解能) = 480
MIDI_Res = 480.
minnotel = 1./4.*MIDI_Res
#rate(許容誤差)
rate = 0.5
#NoteNoが大きいものから順に並び替え
fixToResolution = self.rowsort(fixToResolution)
self.oldFixToResolution = sp.copy(fixToResolution)
#lilypond符号用リスト
book = [[] for i in range(fixToResolution.shape[0])]
for n in range(fixToResolution.shape[0]):
x_cor = fixToResolution[n, 0] + minnotel*rate - 1
#x_cor = fixToResolution[n, 0] + minnotel - 1
x_cor = (sp.floor(x_cor/minnotel))*minnotel
if(x_cor == 0):
x_cor = 1
fixToResolution[n, 0] = x_cor
fixToResolution[n, 3], book[n] = self.normalizeNoteLength(fixToResolution[n, 3] + factor)
book[n] = self.convertNoteNo(fixToResolution[n, 2]) + book[n]
fixToResolution[n, 1] = fixToResolution[n, 3] + fixToResolution[n, 0] - 1
self.book = book
return fixToResolution
示例9: xNES
def xNES(f, x0, maxEvals=1e6, verbose=False, targetFitness= -1e-10):
""" Exponential NES (xNES), as described in
Glasmachers, Schaul, Sun, Wierstra and Schmidhuber (GECCO'10).
Maximizes a function f.
Returns (best solution found, corresponding fitness).
"""
dim = len(x0)
I = eye(dim)
learningRate = 0.6 * (3 + log(dim)) / dim / sqrt(dim)
batchSize = 4 + int(floor(3 * log(dim)))
center = x0.copy()
A = eye(dim) # sqrt of the covariance matrix
numEvals = 0
bestFound = None
bestFitness = -Inf
while numEvals + batchSize <= maxEvals and bestFitness < targetFitness:
# produce and evaluate samples
samples = [randn(dim) for _ in range(batchSize)]
fitnesses = [f(dot(A, s) + center) for s in samples]
if max(fitnesses) > bestFitness:
bestFitness = max(fitnesses)
bestFound = samples[argmax(fitnesses)]
numEvals += batchSize
if verbose: print "Step", numEvals / batchSize, ":", max(fitnesses), "best:", bestFitness
#print A
# update center and variances
utilities = computeUtilities(fitnesses)
center += dot(A, dot(utilities, samples))
covGradient = sum([u * (outer(s, s) - I) for (s, u) in zip(samples, utilities)])
A = dot(A, expm2(0.5 * learningRate * covGradient))
return bestFound, bestFitness
示例10: sart
def sart(self):
self.wij_sum = sp.zeros((self.ny, self.ny))
if self.pslice is None:
slice_range = range(self.nx)
else:
slice_range = [self.pslice]
for self.pslice in slice_range:
self.reco = sp.zeros((self.ny, self.ny))
sinogram = self.projections[:,self.pslice,:]
self.update_figure(pslice=True)
for it in range(self.iterations):
self.upd = sp.zeros_like(self.reco)
for i in range(self.n_proj):
then = time.time()
multip = multiprocess(self.ray_update_worker, num_processes=12 )
for chunk in split_seq(range(self.ny), sp.floor(self.ny/multip.num_processes)):
multip.add_job((self.angles[i], sinogram[i,:], self.reco.copy(), chunk, it==0))
self.do_closeout(multip)
if i%10==0:
print 'Iter: {:d}, Proj: {:d}, Duration: {:3.2f} sec'.format(it, i, time.time()-then)
if it==0:
self.reco+=self.upd/(self.wij_sum+0.1)
else:
self.reco+=self.relax*self.upd/(self.wij_sum+0.1)
self.update_figure()
示例11: ngp
def ngp(parameters,positions,values):
values_ngp = sp.zeros((parameters.Ng,parameters.Ng,parameters.Ng))
counts_ngp = sp.zeros((parameters.Ng,parameters.Ng,parameters.Ng))
cellsize = parameters.boxsize/parameters.Ng
for position,pvalue in zip(positions,values):
position = sp.array(position)
position_cellunits = position/cellsize
# cell indices
cell_indices = sp.floor(position_cellunits)
if periodic_boundaries:
cell_indices = sp.mod(cell_indices,parameters.Ng)
index_x, index_y, index_z = cell_indices[0],cell_indices[1],cell_indices[2]
values_ngp[index_x][index_y][index_z] += pvalue
counts_ngp[index_x][index_y][index_z] += 1
values_ngp = sp.array(values_ngp)/sp.array(counts_ngp)
print "Don't mind this warning. Astropy can handle nan-values"
return values_ngp
示例12: cmd_ylim
def cmd_ylim(mu):
if scipy.ceil(mu) - mu < mu - scipy.floor(mu):
cmax = scipy.ceil(mu) + 1
else:
cmax = scipy.ceil(mu)
cmin = cmax - 3
return cmin, cmax
示例13: plot_down_saw_spec_correct
def plot_down_saw_spec_correct():
plt.close('all')
rate, in_sig = wavfile.read('saw.wav')
old_rate = 44100
new_rate = 22050
in_sig = sp.float32(in_sig)
fin = anfft.fft(in_sig)
nsiz = sp.floor(in_sig.size*new_rate/old_rate)
nsizh = sp.floor(nsiz/2)
fout = sp.zeros(nsiz)
fout = fout + 0j
fout[0:nsizh] = fin[0:nsizh]
fout[nsiz-nsizh+1:] = sp.conj(sp.flipud(fout[1:nsizh]))
f = sp.absolute(fout)
plt.plot(f[0:f.shape[0]/2])
plt.savefig('sawdownspec.pdf')
示例14: rescale
def rescale(self):
if isQuantity(self.unit):
oldUnit = self.unit.inBaseUnits()
else:
return
#Compute decade of field and multiply it to oldUnit
oldFieldAmplitude = max(abs(numpy.amax(self.data)),abs(numpy.amin(self.data)))
oldUnit *= oldFieldAmplitude
#Compute next lower decade
decade = scipy.log10(oldUnit.value)
newDecade = 10**(scipy.floor(decade))
#Find appropriate prefix
baseUnit=oldUnit.unit.name()
if baseUnit == 'm':
prefixes = PREFIXES_METER
else:
prefixes = PREFIXES
prefixCandidates = map(lambda i: (i[0],abs(i[1]-newDecade)),prefixes)
optPrefix = min([prefix[1] for prefix in prefixCandidates])
newPrefix = filter(lambda prefix: prefix[1]==optPrefix,prefixCandidates)[0][0]
newUnitName = newPrefix+baseUnit
#Convert to new unit
newUnit = oldUnit.inUnitsOf(newUnitName)
unitAmplitude = newUnit.value
if self.data.dtype.name.startswith('int'):
self.unit = newUnit/oldFieldAmplitude
return
self.data *= unitAmplitude/oldFieldAmplitude
self.unit = newUnit/unitAmplitude
示例15: split_jobs
def split_jobs(Y, Njobs):
#slit phenotype matrix into jobs
#think about splitting snps also
splits = []
[N, Np] = Y.shape
#maximal splitting range is one job per phenotype
Njobs = min(Njobs,Np)
#figure out phenotypes per job (down rounded)
npj = int(SP.floor(SP.double(Np)/Njobs))
i0 = 0
i1 = npj
for n in xrange(Njobs):
if n==(Njobs-1):
#make sure last jobs spans all the rest.
i1 = Np
Y_ = Y[:,i0:i1]
splits.append([i0, i1, Y_])
#nex split
i0 = i1
i1 = i1 + npj
return splits