本文整理汇总了Python中numpy.ceil函数的典型用法代码示例。如果您正苦于以下问题:Python ceil函数的具体用法?Python ceil怎么用?Python ceil使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ceil函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, dataset_size, batch_size, num_batches=None, rng=None):
self._rng = make_np_rng(rng, which_method=["random_integers",
"shuffle"])
assert num_batches is None or num_batches >= 0
self._dataset_size = dataset_size
if batch_size is None:
if num_batches is not None:
batch_size = int(np.ceil(self._dataset_size / num_batches))
else:
raise ValueError("need one of batch_size, num_batches "
"for sequential batch iteration")
elif batch_size is not None:
if num_batches is not None:
max_num_batches = np.ceil(self._dataset_size / batch_size)
if num_batches > max_num_batches:
raise ValueError("dataset of %d examples can only provide "
"%d batches with batch_size %d, but %d "
"batches were requested" %
(self._dataset_size, max_num_batches,
batch_size, num_batches))
else:
num_batches = np.ceil(self._dataset_size / batch_size)
self._batch_size = batch_size
self._num_batches = int(num_batches)
self._next_batch_no = 0
self._idx = 0
self._batch_order = list(range(self._num_batches))
self._rng.shuffle(self._batch_order)
示例2: max_lm
def max_lm(baselines, wavelengths, uwidth, vwidth=0.0):
"""Get the maximum (l,m) that a baseline is sensitive to.
Parameters
----------
baselines : np.ndarray
An array of baselines.
wavelengths : np.ndarray
An array of wavelengths.
uwidth : np.ndarray
Width of the receiver in the u-direction.
vwidth : np.ndarray
Width of the receiver in the v-direction.
Returns
-------
lmax, mmax : array_like
"""
umax = (np.abs(baselines[:, 0]) + uwidth) / wavelengths
vmax = (np.abs(baselines[:, 1]) + vwidth) / wavelengths
mmax = np.ceil(2 * np.pi * umax).astype(np.int64)
lmax = np.ceil((mmax**2 + (2*np.pi*vmax)**2)**0.5).astype(np.int64)
return lmax, mmax
示例3: _filter_ridge_lines
def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None,
min_snr=1, noise_perc=10):
"""
Filter ridge lines according to prescribed criteria. Intended
to be used for finding relative maxima.
Parameters
-------------
cwt : 2-D ndarray
Continuous wavelet transform from which
the ridge_lines were defined
ridge_lines: 1-D sequence
Each element should contain 2 sequences, the rows and columns
of the ridge line (respectively)
window_size: int, optional
Size of window to use to calculate noise floor.
Default is `cwt`.shape[1]/20
min_length: int, optional
Minimum length a ridge line needs to be acceptable.
Default is `cwt`.shape[0]/4, ie 1/4th the number of widths.
min_snr: float, optional
Minimum SNR ratio. Default 0. The signal is the value of
the cwt matrix at the shortest length scale (`cwt`[0,loc]), the noise is
the `noise_perc`th percentile of datapoints contained within
a window of `window_size` around `cwt`[0,loc]
noise_perc: float,optional
When calculating the noise floor, percentile of data points
examined below which to consider noise. Calculated using
scipy.stats.scoreatpercentile.
References
----------
Bioinformatics (2006) 22 (17): 2059-2065. doi: 10.1093/bioinformatics/btl355
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
"""
num_points = cwt.shape[1]
if min_length is None:
min_length = np.ceil(cwt.shape[0] / 4)
if window_size is None:
window_size = np.ceil(num_points / 20)
hf_window = window_size / 2
#Filter based on SNR
row_one = cwt[0, :]
noises = np.zeros_like(row_one)
for ind, val in enumerate(row_one):
window = np.arange(max([ind - hf_window, 0]), min([ind + hf_window, num_points]))
window = window.astype(int)
noises[ind] = scoreatpercentile(row_one[window], per=noise_perc)
def filt_func(line):
if len(line[0]) < min_length:
return False
snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]])
if snr < min_snr:
return False
return True
return filter(filt_func, ridge_lines)
示例4: setupFakePulsar
def setupFakePulsar(nodes=range(1, 9), fpgaclk=360e6, frqs=cfs, sideband=-1):
n = np.arange(8)
clk = fpgaclk
if frqs is None:
frqs = (
18e9
- (np.ceil(150e6 / (clk * 4 / 1024.0)) * clk * 4 / 1024.0)
+ ((clk * 2) * (2 * n + 1))
- ((np.ceil(150e6 / (clk * 4 / 1024.0)) * clk * 4 / 1024.0) * n)
)
frqd = dict(zip(n + 1, frqs))
esr = fpgaclk * 8 # effective sample rate
pfb_rate = sideband * esr / (2 * 1024.0)
for node in nodes:
vsd[node].setParams(
EFSAMPFR=esr,
NCHAN=1024,
EXPOSURE=1e-6,
SUB0FREQ=frqd[node],
OBSFREQ=frqd[node],
CHAN_BW=pfb_rate,
FPGACLK=fpgaclk,
) # exposure should be ~0 to get every single spectrum
pass
示例5: affine_grid
def affine_grid(self,Hz,rhoz,Lam):
"""
Get data on regular spatial grid
"""
#First find dimensionless density params
Om0 = 8*pi*rhoz[0]/(3*Hz[0]**2)
OL0 = Lam/(3*Hz[0]**2)
Ok0 = 1-Om0-OL0
#Get t0
t0 = self.get_age(Om0,Ok0,OL0,Hz[0])
#Set affine parameter vals
dvo = uvs(self.z,1/(self.uz**2*Hz),k=3,s=0.0)
vzo = dvo.antiderivative()
vz = vzo(self.z)
vz[0] = 0.0
#Compute grid sizes that gives num error od err
NJ = int(ceil(vz[-1]/sqrt(self.err) + 1))
NI = int(ceil(3.0*(NJ - 1)*(t0 - self.tmin)/vz[-1] + 1))
#Get functions on regular grid
v = linspace(0,vz[-1],NJ)
delv = (v[-1] - v[0])/(NJ-1)
if delv > sqrt(self.err):
print 'delv > sqrt(err)'
Ho = uvs(vz,Hz,s=0.0,k=3)
H = Ho(v)
rhoo = uvs(vz,rhoz,s=0.0,k=3)
rho = rhoo(v)
uo = uvs(vz,self.uz,s=0.0,k=3)
u = uo(v)
u[0] = 1.0
return v,vzo,H,rho,u,NJ,NI,delv,Om0,OL0,Ok0,t0
示例6: qwtCanvasClip
def qwtCanvasClip(canvas, canvasRect):
x1 = np.ceil(canvasRect.left())
x2 = np.floor(canvasRect.right())
y1 = np.ceil(canvasRect.top())
y2 = np.floor(canvasRect.bottom())
r = QRect(x1, y1, x2-x1-1, y2-y1-1)
return canvas.borderPath(r)
示例7: each_SASA
def each_SASA(sasas,sort_keys,kcat_cut=30,plot=True,meta=None):
num_sims=len(sort_keys)
labels=label_maker(sasas,kcat_cut=kcat_cut,name_list=sort_keys)
base_size = 20.
wide_factor = 1.5
color_dict={True:'r', False:'g', 'maybe':'b', 'wt':'m'}
ncols = int(np.ceil(np.sqrt(num_sims)))
nrows = int(np.ceil(float(num_sims)/ncols))
fig = plt.figure(figsize=(base_size,base_size*(float(nrows)/ncols)/wide_factor))
gs = gridspec.GridSpec(nrows,ncols,hspace=0.65,wspace=0.8)
axes = [plt.subplot(gs[plot_num/ncols,plot_num%ncols]) for plot_num in range(num_sims)]
max_SASA=0;ts_scaling=0.02
for plot_num,ax in enumerate(axes):
SASA=sasas[sort_keys[plot_num]]
ts_sasa=np.sum([SASA['base_sasa'][res]['sasa_vals'] for res in SASA['base_sasa']],axis=0)
name=SASA['name'];activity=labels[plot_num]
ts = np.array(range(len(ts_sasa)))*ts_scaling
ax.plot(ts,ts_sasa,color=color_dict[activity])
ax.set_title(name)
ax.tick_params(axis='y',which='both',left='off',right='off',labelleft='on')
ax.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='on')
max_SASA=max(max_SASA,max(ts_sasa))
min_SASA=0
if meta:
meta['kcat cut']=kcat_cut
meta['max sasa']=max_sasa
meta['ts scaling']=ts_scaling
else: meta={'kcat cut':kcat_cut,'max sasa':max_SASA,'ts scaling':ts_scaling}
for plot_num,ax in enumerate(axes):
ax.set_ylim(min_SASA,max_SASA)
if plot:
plt.show(block=False)
else: picturesave('fig.each-%s'%plotname,work.plotdir,backup=False,version=True,meta=meta)
示例8: return_unit_round_neighborhood
def return_unit_round_neighborhood(self, row, col, radius):
"""Return a list with (row, col, distance) of the units around a unit. This version uses a circle as radius, all the element inside the radius are taken as neighborood.
@param row index of the unit
@param col the column index of the unit
@param radius the radius of the distance to consider
"""
output_list = list()
if(radius <= 0): output_list.append((row, col, 0)); return output_list #return empty if radius=0
#Finding the square around the unit
#with wide=radius using the ceil of radius
row_range_min = row - int(np.ceil(radius))
if(row_range_min < 0): row_range_min = 0
row_range_max = row + int(np.ceil(radius))
if(row_range_max >= self._matrix_size): row_range_max = self._matrix_size - 1
col_range_min = col - int(np.ceil(radius))
if(col_range_min < 0): col_range_min = 0
col_range_max = col + int(np.ceil(radius))
if(col_range_max >= self._matrix_size): col_range_max = self._matrix_size - 1
for row_iter in range(row_range_min, row_range_max+1):
for col_iter in range(col_range_min, col_range_max+1):
#Finding the distances from the BMU
col_distance = np.abs(col - col_iter)
row_distance = np.abs(row - row_iter)
#Pitagora's Theorem to estimate distance
distance = np.sqrt( np.power(col_distance,2) + np.power(row_distance,2) )
#Store the unit only if the distance is
#less than the radius
if(distance <= radius): output_list.append((row_iter, col_iter, distance))
return output_list
示例9: init_log_binned_fx_buckets
def init_log_binned_fx_buckets(self):
# initializes the refex_log_binned_buckets with the vertical log bin values,
# computed based on p and the number of vertices in the graph
max_fx_value = np.ceil(np.log2(self.no_of_vertices) + self.TOLERANCE) # fixing value of p = 0.5,
# In our experiments, we found p = 0.5 to be a sensible choice:
# with each bin containing the bottom half of the remaining nodes.
log_binned_fx_keys = [value for value in xrange(0, int(max_fx_value))]
fx_bucket_size = []
starting_bucket_size = self.no_of_vertices
for idx in np.arange(0.0, max_fx_value):
starting_bucket_size *= self.p
fx_bucket_size.append(int(np.ceil(starting_bucket_size)))
total_slots_in_all_buckets = sum(fx_bucket_size)
if total_slots_in_all_buckets > self.no_of_vertices:
fx_bucket_size[0] -= (total_slots_in_all_buckets - self.no_of_vertices)
log_binned_buckets_dict = dict(zip(log_binned_fx_keys, fx_bucket_size))
for binned_value in sorted(log_binned_buckets_dict.keys()):
for count in xrange(0, log_binned_buckets_dict[binned_value]):
self.refex_log_binned_buckets.append(binned_value)
if len(self.refex_log_binned_buckets) != self.no_of_vertices:
raise Exception("Vertical binned bucket size not equal to the number of vertices!")
示例10: _drawGraticules
def _drawGraticules(self,m,gd):
par = np.arange(np.ceil(gd.ymin),np.floor(gd.ymax)+1,1.0)
mer = np.arange(np.ceil(gd.xmin),np.floor(gd.xmax)+1,1.0)
merdict = m.drawmeridians(mer,labels=[0,0,0,1],fontsize=10,
linewidth=0.5,color='gray',zorder=GRATICULE_ZORDER)
pardict = m.drawparallels(par,labels=[1,0,0,0],fontsize=10,
linewidth=0.5,color='gray',zorder=GRATICULE_ZORDER)
#loop over meridian and parallel dicts, change/increase font, draw ticks
xticks = []
for merkey,mervalue in merdict.items():
merline,merlablist = mervalue
merlabel = merlablist[0]
merlabel.set_family('sans-serif')
merlabel.set_fontsize(12.0)
xticks.append(merline[0].get_xdata()[0])
yticks = []
for parkey,parvalue in pardict.items():
parline,parlablist = parvalue
parlabel = parlablist[0]
parlabel.set_family('sans-serif')
parlabel.set_fontsize(12.0)
yticks.append(parline[0].get_ydata()[0])
#plt.tick_params(axis='both',color='k',direction='in')
plt.xticks(xticks,())
plt.yticks(yticks,())
m.ax.tick_params(direction='out')
示例11: sample_size_necessary_under_cph
def sample_size_necessary_under_cph(power, ratio_of_participants, p_exp, p_con,
postulated_hazard_ratio, alpha=0.05):
"""
This computes the sample size for needed power to compare two groups under a Cox
Proportional Hazard model.
References:
https://cran.r-project.org/web/packages/powerSurvEpi/powerSurvEpi.pdf
Parameters:
power: power to detect the magnitude of the hazard ratio as small as that specified by postulated_hazard_ratio.
ratio_of_participants: ratio of participants in experimental group over control group.
p_exp: probability of failure in experimental group over period of study.
p_con: probability of failure in control group over period of study
postulated_hazard_ratio: the postulated hazard ratio
alpha: type I error rate
Returns:
n_exp, n_con: the samples sizes need for the experiment and control group, respectively, to achieve desired power
"""
z = lambda p: stats.norm.ppf(p)
m = 1.0 / ratio_of_participants \
* ((ratio_of_participants * postulated_hazard_ratio + 1.0) / (postulated_hazard_ratio - 1.0)) ** 2 \
* (z(1. - alpha / 2.) + z(power)) ** 2
n_exp = m * ratio_of_participants / (ratio_of_participants * p_exp + p_con)
n_con = m / (ratio_of_participants * p_exp + p_con)
return int(np.ceil(n_exp)), int(np.ceil(n_con))
示例12: create_mask
def create_mask(Nx,Ny,frac,
rmin = 0.5,
rmax = 2):
"""
create a mask Nx by Ny pixels
frac: 0 <= frac <= 1: fraction of pixels to be covered
"""
mask = numpy.ones((Nx,Ny))
ncovered = 0
goal = frac*Nx*Ny
while ncovered < goal:
x = Nx*numpy.random.random()
y = Ny*numpy.random.random()
r = rmin + numpy.random.random()*(rmax-rmin)
xmin = max(0,int(numpy.floor(x-r)))
xmax = min(Nx,int(numpy.ceil(x+r)))
ymin = max(0,int(numpy.floor(y-r)))
ymax = min(Ny,int(numpy.ceil(y+r)))
for ix in range(xmin,xmax):
for iy in range(ymin,ymax):
if (x-ix)**2 + (y-iy)**2 < r**2:
ncovered += mask[ix,iy]
mask[ix,iy] = 0
return mask
示例13: dispims_color
def dispims_color(M, border=0, bordercolor=[0.0, 0.0, 0.0], savePath=None, *imshow_args, **imshow_keyargs):
""" Display an array of rgb images.
The input array is assumed to have the shape numimages x numpixelsY x numpixelsX x 3
"""
bordercolor = numpy.array(bordercolor)[None, None, :]
numimages = len(M)
M = M.copy()
for i in range(M.shape[0]):
M[i] -= M[i].flatten().min()
M[i] /= M[i].flatten().max()
height, width, three = M[0].shape
assert three == 3
n0 = numpy.int(numpy.ceil(numpy.sqrt(numimages)))
n1 = numpy.int(numpy.ceil(numpy.sqrt(numimages)))
im = numpy.array(bordercolor)*numpy.ones(
((height+border)*n1+border,(width+border)*n0+border, 1),dtype='<f8')
for i in range(n0):
for j in range(n1):
if i*n1+j < numimages:
im[j*(height+border)+border:(j+1)*(height+border)+border,
i*(width+border)+border:(i+1)*(width+border)+border,:] = numpy.concatenate((
numpy.concatenate((M[i*n1+j,:,:,:],
bordercolor*numpy.ones((height,border,3),dtype=float)), 1),
bordercolor*numpy.ones((border,width+border,3),dtype=float)
), 0)
imshow_keyargs["interpolation"]="nearest"
pylab.imshow(im, *imshow_args, **imshow_keyargs)
if savePath == None:
pylab.show()
else:
pylab.savefig(savePath)
示例14: _scale_to_res
def _scale_to_res(self):
"""Change self._A and _extent to render an image whose
resolution is matched to the eventual rendering."""
ax = self.axes
ext = ax.transAxes.transform([1, 1]) - ax.transAxes.transform([0, 0])
xlim, ylim = ax.get_xlim(), ax.get_ylim()
dx, dy = xlim[1] - xlim[0], ylim[1] - ylim[0]
y0 = max(self.miny, ylim[0] - 5)
y1 = min(self._full_res.shape[0] + self.miny, ylim[1] + 5)
x0 = max(self.minx, xlim[0] - 5)
x1 = min(self._full_res.shape[1] + self.minx, xlim[1] + 5)
y0, y1, x0, x1 = map(int, [y0, y1, x0, x1])
sy = int(max(1, min((y1 - y0) / 5., np.ceil(dy / ext[1]))))
sx = int(max(1, min((x1 - x0) / 5., np.ceil(dx / ext[0]))))
# have we already calculated what we need?
if sx == self._sx and sy == self._sy and \
x0 == self._bounds[0] and x1 == self._bounds[1] and \
y0 == self._bounds[2] and y1 == self._bounds[3]:
return
self._A = self._full_res[y0 - self.miny:y1 - self.miny:sy,
x0 - self.minx:x1 - self.minx:sx]
x1 = x0 + self._A.shape[1] * sx
y1 = y0 + self._A.shape[0] * sy
self.set_extent([x0 - .5, x1 - .5, y0 - .5, y1 - .5])
self._sx = sx
self._sy = sy
self._bounds = (x0, x1, y0, y1)
self.changed()
示例15: check_orbits
def check_orbits(p1, t1, p2, t2, tmn, tmx, tol):
n1 = t1 + p1 * np.arange(np.floor((tmn-t1)/p1), np.ceil((tmx-t1)/p1))
n1 = n1[(tmn <= n1) * (n1 <= tmx)]
n2 = t2 + p2 * np.arange(np.floor((tmn-t2)/p2), np.ceil((tmx-t2)/p2))
n2 = n2[(tmn <= n2) * (n2 <= tmx)]
delta = np.fabs(n1[:, None] - n2[None, :])
return max(len(n1), len(n2)) == np.sum(delta < tol)