本文整理匯總了Python中builtins.round方法的典型用法代碼示例。如果您正苦於以下問題:Python builtins.round方法的具體用法?Python builtins.round怎麽用?Python builtins.round使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類builtins
的用法示例。
在下文中一共展示了builtins.round方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: deepdream
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import round [as 別名]
def deepdream(image, iter_n=10, octave_n=4, octave_scale=1.4, name="Deep Dream"):
model = DreamModel(model_path=args.data_dir)
detail = None
scales = [octave_scale ** -o for o in reversed(list(range(octave_n)))]
for o_idx, scale in enumerate(scales):
octave_shape = (
3, round(image.shape[1] * scale), round(image.shape[2] * scale))
octave_base = zoom_to(image.as_tensor(), octave_shape)
detail = np.zeros_like(octave_base) if detail is None else zoom_to(
detail, octave_shape)
dream = DeepImage(octave_base + detail)
model.initialize(dream)
for i in range(iter_n):
dream.take_step(model)
ofile = get_numbered_file(args.dream_file, o_idx * iter_n + i)
dream.save_image(ofile)
detail = dream.as_tensor() - octave_base
return dream
示例2: memory_efficiency
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import round [as 別名]
def memory_efficiency(self):
mem = 100
if self.memory_footprint() > 0:
mem = round(float(self.memory_usage()) / float(self.memory_footprint()) * 100)
mem = int(mem)
return mem
示例3: test_numericalValues
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import round [as 別名]
def test_numericalValues(self, model, index, value, dtype, precision):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(dtype)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
newValue = value + 1
model.enableEditing(True)
assert model.setData(index, newValue)
if precision:
modelValue = model.data(index, role=Qt.DisplayRole)
#assert abs(decimal.Decimal(str(modelValue)).as_tuple().exponent) == precision
assert model.data(index) == round(newValue, precision)
assert model.data(index, role=Qt.DisplayRole) == round(newValue, precision)
assert model.data(index, role=Qt.EditRole) == round(newValue, precision)
else:
assert model.data(index) == newValue
assert model.data(index, role=Qt.DisplayRole) == newValue
assert model.data(index, role=Qt.EditRole) == newValue
assert model.data(index, role=Qt.CheckStateRole) == None
assert isinstance(model.data(index, role=DATAFRAME_ROLE), dtype)
assert model.data(index, role=DATAFRAME_ROLE).dtype == dtype
示例4: round
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import round [as 別名]
def round(number, *args):
'''Replacement for the built-in
:func:`round() <python:round>` function.'''
return builtins.round(number, *args)
示例5: round
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import round [as 別名]
def round(num, prec=None):
if 0 < prec < 1:
return round(num, -int(rmath.log10(prec)))
if type(num) == complex:
return complex(round(num.real, prec), round(num.imag, prec))
if prec:
return builtins.round(num, int(prec))
return builtins.round(num)
示例6: gradient
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import round [as 別名]
def gradient(func, x, h=1e-7):
result = (func(x + h) - func(x - h)) / (2 * h)
if h < 1:
result = round(result, h)
return result
示例7: round_to
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import round [as 別名]
def round_to(self, base):
r"""Round to a specific base (like it's required for a grid)
:param base: base we want to round to
:return: rounded point
>>> from KicadModTree import *
>>> Vector2D(0.1234, 0.5678).round_to(0.01)
"""
if base == 0 or base is None:
return self.__copy__()
return Vector2D([round(v / base) * base for v in self])
示例8: quantize
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import round [as 別名]
def quantize(number, digits=0, q=builtins.round):
"""
Quantize to somewhere in between a magnitude.
For example:
* ceil(55.25, 1.2) => 55.26
* floor(55.25, 1.2) => 55.24
* round(55.3333, 2.5) => 55.335
* round(12.345, 1.1) == round(12.345, 2) == 12.34
"""
base, fraction = split(digits)
# quantization beyond an order of magnitude results in a variable amount
# of decimal digits depending on the lowest common multiple,
# e.g. floor(1.2341234, 1.25) = 1.225 but floor(1.2341234, 1.5) = 1.20
if fraction * 10 % 1 > 0:
digits = base + 2
else:
digits = base + 1
multiplier = 10 ** base * invert(fraction, default=1)
quantized = q(number * multiplier) / multiplier
# additional rounding step to get rid of floating point math wonkiness
return builtins.round(quantized, digits)
示例9: conv_dec_gms
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import round [as 別名]
def conv_dec_gms(self, base_coord, coord_spacing, u, neg_character, pos_character):
xbase = base_coord + coord_spacing*u
x = abs(xbase)
xdeg = floor(round(x,4))
xmin = floor(round(((x - xdeg)*60),4))
xseg = floor(round(((x - xdeg - xmin/60)*60),4))
if xbase < 0:
xhem = neg_character
else:
xhem = pos_character
conv_exp_str = '\'' + str(xdeg).rjust(2,'0') + 'º ' + str(xmin).rjust(2,'0') + str('\\') + str('\' ') + str(xseg).rjust(2,'0') + '"\'' + '+\' ' + str(xhem) + '\''
return conv_exp_str
示例10: compound_bprop_bn
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import round [as 別名]
def compound_bprop_bn(self, delta_out, grad_gamma, grad_beta, delta_in,
x, xsum, xvar, gamma, eps, threads=None,
repeat=1, binary=False, layer=None):
"""
Function to perform batch normalization forward pass.
Arguments:
delta_out (Tensor): Delta buffer (where to write the output deltas)
grad_gamma (Tensor): Gradient w.r.t. gamma
grad_beta (Tensor): Gradient w.r.t. beta
delta_in (Tensor): Delta buffer (where to get the input deltas)
x (Tensor): feedforward input
xsum (Tensor): Batch sum over PQN dimension
xvar (Tensor): Batch variance
gamma (Tensor): scale parameter
eps (float): constant for numerical stability
threads (int): Number of GPU threads
repeat (int): Repeats for benchmarking
binary (bool): Binary shift based computations
"""
assert xsum.dtype.type is np.float32, "xsum should be fp32"
K = int(x.shape[0])
N = int(x.shape[1])
if threads is None:
if N <= 8192:
threads = 1 << max(5, int(round(log(N, 2))) - 3)
else:
threads = 128 if K < 192 else 64
params = [(K, 1, 1), (threads, 1, 1), x.backend.stream,
delta_out.gpudata, grad_gamma.gpudata, grad_beta.gpudata, delta_in.gpudata,
x.gpudata, xsum.gpudata, xvar.gpudata, gamma.gpudata, eps, N, binary]
from neon.backends.float_ew import _get_bn_bprop_kernel
kernel = _get_bn_bprop_kernel(x.dtype.str[1:], threads, self.compute_capability)
self._execute_bn(kernel, params, repeat, x.nbytes * 4, N)
示例11: fprop_roipooling_ref
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import round [as 別名]
def fprop_roipooling_ref(fm, rois, fm_channel, fm_height, fm_width, bsz, rois_per_image, H, W):
feature_maps = fm.reshape(fm_channel, fm_height, fm_width, bsz)
rois_per_batch = rois_per_image * bsz
outputs = np.zeros((fm_channel, H, W, rois_per_batch))
# combine the feature map with ROIs
for b_id in range(rois_per_batch):
[idx, xmin, ymin, xmax, ymax] = rois[b_id]
xmin = int(round(xmin * spatial_scale))
xmax = int(round(xmax * spatial_scale))
ymin = int(round(ymin * spatial_scale))
ymax = int(round(ymax * spatial_scale))
roi_width = max(xmax - xmin + 1, 1)
roi_height = max(ymax - ymin + 1, 1)
stride_h = float(roi_height) / H
stride_w = float(roi_width) / W
for h_out in range(H):
sliceh, _ = _fprop_slice_np(h_out, stride_h, fm_height, ymin)
if sliceh.stop <= sliceh.start:
continue
for w_out in range(W):
slicew, _ = _fprop_slice_np(w_out, stride_w, fm_width, xmin)
if slicew.stop <= slicew.start:
continue
else:
array_I = feature_maps[:, sliceh, slicew, int(idx)].reshape(
fm_channel, -1)
outputs[:, h_out, w_out, b_id] = np.max(array_I, axis=1)
return outputs.reshape(-1, rois_per_batch)
示例12: to_gds
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import round [as 別名]
def to_gds(self, outfile, multiplier):
"""
Convert this object to a GDSII element.
Parameters
----------
outfile : open file
Output to write the GDSII.
multiplier : number
A number that multiplies all dimensions written in the GDSII
element.
"""
if isinstance(self.ref_cell, Cell):
name = self.ref_cell.name
else:
name = self.ref_cell
if len(name) % 2 != 0:
name = name + "\0"
outfile.write(struct.pack(">4H", 4, 0x0A00, 4 + len(name), 0x1206))
outfile.write(name.encode("ascii"))
if (
(self.rotation is not None)
or (self.magnification is not None)
or self.x_reflection
):
word = 0
values = b""
if self.x_reflection:
word += 0x8000
if not (self.magnification is None):
# This flag indicates that the magnification is absolute, not
# relative (not supported).
# word += 0x0004
values += struct.pack(">2H", 12, 0x1B05) + _eight_byte_real(
self.magnification
)
if not (self.rotation is None):
# This flag indicates that the rotation is absolute, not
# relative (not supported).
# word += 0x0002
values += struct.pack(">2H", 12, 0x1C05) + _eight_byte_real(
self.rotation
)
outfile.write(struct.pack(">3H", 6, 0x1A01, word))
outfile.write(values)
outfile.write(
struct.pack(
">2H2l2H",
12,
0x1003,
int(round(self.origin[0] * multiplier)),
int(round(self.origin[1] * multiplier)),
4,
0x1100,
)
)
示例13: compound_fprop_bn
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import round [as 別名]
def compound_fprop_bn(self, x, xsum, xvar, gmean, gvar, gamma, beta, y, eps, rho, compute_batch_sum,
accumbeta=0.0, relu=False, threads=None, repeat=1,
binary=False, inference=False, outputs=None, layer=None):
"""
Function to perform compound kernel call for batch normalization
forward pass.
Arguments:
x (Tensor): Input from previous layer
xsum (Tensor): Precomputed batch sum over PQN dimension
xvar (Tensor): Buffer for variance (computed in kernel)
gmean (Tensor): global mean ()
gvar (Tensor): global variance
gamma (Tensor): scale parameter
beta (Tensor): location parameter
y (Tensor): normalized output
eps (float): constant for numerical stability
rho (float): exponential window averaging constant
accumbeta (float): value to scale output by before accumulating
relu (bool): Compound ReLU activation in kernel
threads (int): Number of GPU threads
repeat (int): Repeats for benchmarking
binary (bool): Binary shift based computations
"""
assert xsum.dtype.type is np.float32
if inference:
xhat = (x - gmean) / self.sqrt(gvar + eps) # Op-tree only
y[:] = y * accumbeta + xhat * gamma + beta
return
if compute_batch_sum:
xsum[:] = self.sum(x, axis=1)
K = int(x.shape[0])
N = int(x.shape[1])
if threads is None:
if N <= 8192:
threads = 1 << max(5, int(round(log(N, 2))) - 3)
else:
occup = K / (128.0 * _get_sm_count())
for t in (32, 64, 128, 256, 512):
if occup * t > 5.0:
threads = t
break
if threads is None:
threads = 1024
params = [(K, 1, 1), (threads, 1, 1), x.backend.stream,
y.gpudata, xvar.gpudata, gmean.gpudata, gvar.gpudata,
x.gpudata, xsum.gpudata, gmean.gpudata, gvar.gpudata,
gamma.gpudata, beta.gpudata, eps, rho, accumbeta, N,
relu, binary]
from neon.backends.float_ew import _get_bn_fprop_kernel
kernel = _get_bn_fprop_kernel(x.dtype.str[1:], threads, self.compute_capability)
self._execute_bn(kernel, params, repeat, x.nbytes * 2, N)
示例14: roipooling_fprop
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import round [as 別名]
def roipooling_fprop(self, I, rois, O, argmax, roi_count, C, H, W,
pooled_height, pooled_width, spatial_scale):
"""
Function to perform fprop of ROIPooling
Arguments:
I (Tensor): (C, H, W, N)
rois (Tensor): (ROIs, 5)
O (Tensor): (C, pooled_height, pooled_width, roi_count)
argmax (Tensor): (C, pooled_height, pooled_width, roi_count)
"""
assert I.size == C * H * W * self.bsz,\
"ROIPooling input feature map size do not match"
assert O.size == argmax.size == C * pooled_height * pooled_width * roi_count,\
"ROIPooling output shape do not match"
assert rois.shape[1] == 5, "ROIs should be on the row dimension"
assert rois.shape[0] == roi_count, "ROIs do not match with roi count"
array_fm = I._tensor.reshape(C, H, W, self.bsz)
array_rois = rois._tensor
array_O = O._tensor.reshape(C, pooled_height, pooled_width, roi_count)
array_argmax = argmax._tensor.reshape(C, pooled_height, pooled_width, roi_count)
array_O[:] = 0
array_argmax[:] = -1
# combine the feature map with ROIs
for b_id in xrange(roi_count):
[idx, xmin, ymin, xmax, ymax] = array_rois[b_id]
xmin = int(round(xmin * spatial_scale))
xmax = int(round(xmax * spatial_scale))
ymin = int(round(ymin * spatial_scale))
ymax = int(round(ymax * spatial_scale))
roi_width = max(xmax - xmin + 1, 1)
roi_height = max(ymax - ymin + 1, 1)
stride_h = float(roi_height) / float(pooled_height)
stride_w = float(roi_width) / float(pooled_width)
for h_out in xrange(pooled_height):
sliceh, lenh = self._roipooling_slice(h_out, stride_h, H, ymin)
if sliceh.stop <= sliceh.start:
continue
for w_out in xrange(pooled_width):
slicew, lenw = self._roipooling_slice(w_out, stride_w, W, xmin)
if slicew.stop <= slicew.start:
continue
else:
array_I = array_fm[:, sliceh, slicew, int(idx)].reshape(C, -1)
array_O[:, h_out, w_out, b_id] = np.max(array_I, axis=1)
# get the max idx respect to feature_maps coordinates
max_idx_slice = np.unravel_index(np.argmax(array_I, axis=1), (lenh, lenw))
max_idx_slice_h = max_idx_slice[0] + sliceh.start
max_idx_slice_w = max_idx_slice[1] + slicew.start
max_idx_slice = max_idx_slice_h * W + max_idx_slice_w
array_argmax[:, h_out, w_out, b_id] = max_idx_slice
示例15: roipooling_bprop
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import round [as 別名]
def roipooling_bprop(self, I, rois, O, argmax, roi_count, C, H, W,
pooled_height, pooled_width, spatial_scale):
"""
Function to perform bprop of ROIPooling.
Arguments:
I (Tensor): input errors (C, pooled_height, pooled_width, roi_count)
argmax (Tensor): max args from the fprp (C, pooled_height, pooled_width, roi_count)
rois (Tensor): (ROIs, 5)
O (Tensor): output deltas (C, H, W, N)
"""
assert I.size == argmax.size == C * pooled_height * pooled_width * roi_count,\
"ROIPooling bprop input size do not match"
assert O.size == C * H * W * self.bsz,\
"ROIPooling bprop output size do not match"
assert rois.shape[1] == 5, "ROIs should be on the row dimension"
assert rois.shape[0] == roi_count, "ROIs do not match with roi count"
array_E = I._tensor.reshape(C, pooled_height, pooled_width, roi_count)
array_rois = rois._tensor
array_delta = O._tensor.reshape(C, H, W, self.bsz)
array_argmax = argmax._tensor.reshape(C, pooled_height, pooled_width, roi_count)
array_delta[:] = 0
for b_id in xrange(roi_count):
[idx, xmin, ymin, xmax, ymax] = array_rois[b_id]
xmin = int(round(xmin * spatial_scale))
xmax = int(round(xmax * spatial_scale))
ymin = int(round(ymin * spatial_scale))
ymax = int(round(ymax * spatial_scale))
roi_width = max(xmax - xmin + 1, 1)
roi_height = max(ymax - ymin + 1, 1)
stride_h = float(roi_height) / float(pooled_height)
stride_w = float(roi_width) / float(pooled_width)
# iterate all the w, h (from feature map) that fall into this ROIs
for w in range(xmin, xmax + 1):
for h in range(ymin, ymax + 1):
phstart = int(np.floor(float(h - ymin) / stride_h))
phend = int(np.ceil(float(h - ymin + 1) / stride_h))
pwstart = int(np.floor(float(w - xmin) / stride_w))
pwend = int(np.ceil(float(w - xmin + 1) / stride_w))
phstart = min(max(phstart, 0), pooled_height)
phend = min(max(phend, 0), pooled_height)
pwstart = min(max(pwstart, 0), pooled_width)
pwend = min(max(pwend, 0), pooled_width)
for ph in range(phstart, phend):
for pw in range(pwstart, pwend):
max_idx_tmp = array_argmax[:, ph, pw, b_id]
for c in range(C):
if max_idx_tmp[c] == (h * W + w):
array_delta[c, h, w, int(idx)] += array_E[c, ph, pw, b_id]