本文整理匯總了Python中numpy.finfo方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.finfo方法的具體用法?Python numpy.finfo怎麽用?Python numpy.finfo使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類numpy
的用法示例。
在下文中一共展示了numpy.finfo方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import finfo [as 別名]
def __init__(self, renders=True):
# start the bullet physics server
self._renders = renders
if (renders):
p.connect(p.GUI)
else:
p.connect(p.DIRECT)
observation_high = np.array([
np.finfo(np.float32).max,
np.finfo(np.float32).max,
np.finfo(np.float32).max,
np.finfo(np.float32).max])
action_high = np.array([0.1])
self.action_space = spaces.Discrete(9)
self.observation_space = spaces.Box(-observation_high, observation_high)
self.theta_threshold_radians = 1
self.x_threshold = 2.4
self._seed()
# self.reset()
self.viewer = None
self._configure()
示例2: SpectralClustering
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import finfo [as 別名]
def SpectralClustering(CKSym, n):
# This is direct port of JHU vision lab code. Could probably use sklearn SpectralClustering.
CKSym = CKSym.astype(float)
N, _ = CKSym.shape
MAXiter = 1000 # Maximum number of iterations for KMeans
REPlic = 20 # Number of replications for KMeans
DN = np.diag(np.divide(1, np.sqrt(np.sum(CKSym, axis=0) + np.finfo(float).eps)))
LapN = identity(N).toarray().astype(float) - np.matmul(np.matmul(DN, CKSym), DN)
_, _, vN = np.linalg.svd(LapN)
vN = vN.T
kerN = vN[:, N - n:N]
normN = np.sqrt(np.sum(np.square(kerN), axis=1))
kerNS = np.divide(kerN, normN.reshape(len(normN), 1) + np.finfo(float).eps)
km = KMeans(n_clusters=n, n_init=REPlic, max_iter=MAXiter, n_jobs=-1).fit(kerNS)
return km.labels_
示例3: merge
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import finfo [as 別名]
def merge(self, tiles: List[np.ndarray], dtype=np.float32):
if len(tiles) != len(self.crops):
raise ValueError
channels = 1 if len(tiles[0].shape) == 2 else tiles[0].shape[2]
target_shape = self.image_height + self.margin_bottom + self.margin_top, self.image_width + self.margin_right + self.margin_left, channels
image = np.zeros(target_shape, dtype=np.float64)
norm_mask = np.zeros(target_shape, dtype=np.float64)
w = np.dstack([self.weight] * channels)
for tile, (x, y, tile_width, tile_height) in zip(tiles, self.crops):
# print(x, y, tile_width, tile_height, image.shape)
image[y:y + tile_height, x:x + tile_width] += tile * w
norm_mask[y:y + tile_height, x:x + tile_width] += w
# print(norm_mask.min(), norm_mask.max())
norm_mask = np.clip(norm_mask, a_min=np.finfo(norm_mask.dtype).eps, a_max=None)
normalized = np.divide(image, norm_mask).astype(dtype)
crop = self.crop_to_orignal_size(normalized)
return crop
示例4: _mutual_information_varoquaux
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import finfo [as 別名]
def _mutual_information_varoquaux(x, y, bins=256, sigma=1, normalized=True):
"""Based on Gael Varoquaux's implementation: https://gist.github.com/GaelVaroquaux/ead9898bd3c973c40429."""
jh = np.histogram2d(x, y, bins=bins)[0]
# smooth the jh with a gaussian filter of given sigma
scipy.ndimage.gaussian_filter(jh, sigma=sigma, mode="constant", output=jh)
# compute marginal histograms
jh = jh + np.finfo(float).eps
sh = np.sum(jh)
jh = jh / sh
s1 = np.sum(jh, axis=0).reshape((-1, jh.shape[0]))
s2 = np.sum(jh, axis=1).reshape((jh.shape[1], -1))
if normalized:
mi = ((np.sum(s1 * np.log(s1)) + np.sum(s2 * np.log(s2))) / np.sum(jh * np.log(jh))) - 1
else:
mi = np.sum(jh * np.log(jh)) - np.sum(s1 * np.log(s1)) - np.sum(s2 * np.log(s2))
return mi
示例5: finish_episode
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import finfo [as 別名]
def finish_episode(self, log_probas, saved_rewards):
R = 0
policy_loss = []
rewards = []
for r in saved_rewards:
R = r + 0.8 * R
rewards.insert(0, R)
rewards = torch.Tensor(rewards)
# update: we notice improved performance without reward normalization
# rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)
for log_prob, reward in zip(log_probas, rewards):
policy_loss.append((-log_prob * reward).unsqueeze(0))
l = len(policy_loss)
policy_loss = torch.cat(policy_loss).sum()
return policy_loss / l
示例6: test_int_int_min_max
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import finfo [as 別名]
def test_int_int_min_max():
# Conversion between (u)int and (u)int
eps = np.finfo(np.float64).eps
rtol = 1e-6
for in_dt in IUINT_TYPES:
iinf = np.iinfo(in_dt)
arr = np.array([iinf.min, iinf.max], dtype=in_dt)
for out_dt in IUINT_TYPES:
try:
aw = SlopeInterArrayWriter(arr, out_dt)
except ScalingError:
continue
arr_back_sc = round_trip(aw)
# integer allclose
adiff = int_abs(arr - arr_back_sc)
rdiff = adiff / (arr + eps)
assert_true(np.all(rdiff < rtol))
示例7: test_int_int_slope
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import finfo [as 別名]
def test_int_int_slope():
# Conversion between (u)int and (u)int for slopes only
eps = np.finfo(np.float64).eps
rtol = 1e-7
for in_dt in IUINT_TYPES:
iinf = np.iinfo(in_dt)
for out_dt in IUINT_TYPES:
kinds = np.dtype(in_dt).kind + np.dtype(out_dt).kind
if kinds in ('ii', 'uu', 'ui'):
arrs = (np.array([iinf.min, iinf.max], dtype=in_dt),)
elif kinds == 'iu':
arrs = (np.array([iinf.min, 0], dtype=in_dt),
np.array([0, iinf.max], dtype=in_dt))
for arr in arrs:
try:
aw = SlopeArrayWriter(arr, out_dt)
except ScalingError:
continue
assert_false(aw.slope == 0)
arr_back_sc = round_trip(aw)
# integer allclose
adiff = int_abs(arr - arr_back_sc)
rdiff = adiff / (arr + eps)
assert_true(np.all(rdiff < rtol))
示例8: test_check_nmant_nexp
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import finfo [as 別名]
def test_check_nmant_nexp():
# Routine for checking number of sigificand digits and exponent
for t in IEEE_floats:
nmant = np.finfo(t).nmant
maxexp = np.finfo(t).maxexp
assert_true(_check_nmant(t, nmant))
assert_false(_check_nmant(t, nmant - 1))
assert_false(_check_nmant(t, nmant + 1))
assert_true(_check_maxexp(t, maxexp))
assert_false(_check_maxexp(t, maxexp - 1))
assert_false(_check_maxexp(t, maxexp + 1))
# Check against type_info
for t in ok_floats():
ti = type_info(t)
if ti['nmant'] != 106: # This check does not work for PPC double pair
assert_true(_check_nmant(t, ti['nmant']))
assert_true(_check_maxexp(t, ti['maxexp']))
示例9: test_rt_bias
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import finfo [as 別名]
def test_rt_bias():
# Check for bias in round trip
# Parallel test to arraywriters
rng = np.random.RandomState(20111214)
mu, std, count = 100, 10, 100
arr = rng.normal(mu, std, size=(count,))
eps = np.finfo(np.float32).eps
aff = np.eye(4)
for in_dt in (np.float32, np.float64):
arr_t = arr.astype(in_dt)
for out_dt in IUINT_TYPES:
img = Nifti1Image(arr_t, aff)
img_back = round_trip(img)
arr_back_sc = img_back.get_data()
slope, inter = img_back.get_header().get_slope_inter()
bias = np.mean(arr_t - arr_back_sc)
# Get estimate for error
max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, inter)
# Hokey use of max_miss as a std estimate
bias_thresh = np.max([max_miss / np.sqrt(count), eps])
assert_true(np.abs(bias) < bias_thresh)
示例10: iou
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import finfo [as 別名]
def iou(yx_min1, yx_max1, yx_min2, yx_max2, min=None):
"""
Calculates the IoU of two bounding boxes.
:author 申瑞瑉 (Ruimin Shen)
:param yx_min1: The top left coordinates (y, x) of the first bounding boxe.
:param yx_max1: The bottom right coordinates (y, x) of the first bounding boxe.
:param yx_min2: The top left coordinates (y, x) of the second bounding boxe.
:param yx_max2: The bottom right coordinates (y, x) of the second bounding boxe.
:return: The IoU.
"""
assert np.all(yx_min1 <= yx_max1)
assert np.all(yx_min2 <= yx_max2)
if min is None:
min = np.finfo(yx_min1.dtype).eps
yx_min = np.maximum(yx_min1, yx_min2)
yx_max = np.minimum(yx_max1, yx_max2)
intersect_area = np.multiply.reduce(np.maximum(0.0, yx_max - yx_min))
area1 = np.multiply.reduce(yx_max1 - yx_min1)
area2 = np.multiply.reduce(yx_max2 - yx_min2)
assert np.all(intersect_area >= 0)
assert np.all(intersect_area <= area1)
assert np.all(intersect_area <= area2)
union_area = np.maximum(area1 + area2 - intersect_area, min)
return intersect_area / union_area
示例11: iou_matrix
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import finfo [as 別名]
def iou_matrix(yx_min1, yx_max1, yx_min2, yx_max2, min=None):
"""
Calculates the IoU of two lists of bounding boxes.
:author 申瑞瑉 (Ruimin Shen)
:param yx_min1: The top left coordinates (y, x) of the first list (size [N1, 2]) of bounding boxes.
:param yx_max1: The bottom right coordinates (y, x) of the first list (size [N1, 2]) of bounding boxes.
:param yx_min2: The top left coordinates (y, x) of the second list (size [N2, 2]) of bounding boxes.
:param yx_max2: The bottom right coordinates (y, x) of the second list (size [N2, 2]) of bounding boxes.
:return: The matrix (size [N1, N2]) of the IoU.
"""
if min is None:
min = np.finfo(yx_min1.dtype).eps
assert np.all(yx_min1 <= yx_max1)
assert np.all(yx_min2 <= yx_max2)
intersect_area = intersection_area(yx_min1, yx_max1, yx_min2, yx_max2)
area1 = np.expand_dims(np.multiply.reduce(yx_max1 - yx_min1, -1), 1)
area2 = np.expand_dims(np.multiply.reduce(yx_max2 - yx_min2, -1), 0)
assert np.all(intersect_area >= 0)
assert np.all(intersect_area <= area1)
assert np.all(intersect_area <= area2)
union_area = np.maximum(area1 + area2 - intersect_area, min)
return intersect_area / union_area
示例12: batch_iou_pair
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import finfo [as 別名]
def batch_iou_pair(yx_min1, yx_max1, yx_min2, yx_max2, min=float(np.finfo(np.float32).eps)):
"""
Pairwisely calculates the IoU of two lists (at the same size M) of bounding boxes for N independent batches.
:author 申瑞瑉 (Ruimin Shen)
:param yx_min1: The top left coordinates (y, x) of the first lists (size [N, M, 2]) of bounding boxes.
:param yx_max1: The bottom right coordinates (y, x) of the first lists (size [N, M, 2]) of bounding boxes.
:param yx_min2: The top left coordinates (y, x) of the second lists (size [N, M, 2]) of bounding boxes.
:param yx_max2: The bottom right coordinates (y, x) of the second lists (size [N, M, 2]) of bounding boxes.
:return: The lists (size [N, M]) of the IoU.
"""
yx_min = torch.max(yx_min1, yx_min2)
yx_max = torch.min(yx_max1, yx_max2)
size = torch.clamp(yx_max - yx_min, min=0)
intersect_area = torch.prod(size, -1)
area1 = torch.prod(yx_max1 - yx_min1, -1)
area2 = torch.prod(yx_max2 - yx_min2, -1)
union_area = torch.clamp(area1 + area2 - intersect_area, min=min)
return intersect_area / union_area
示例13: get_fbank
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import finfo [as 別名]
def get_fbank(voice, mfc_obj):
# Extract log mel-spectrogra
fbank = mfc_obj.sig2logspec(voice).astype('float32')
# Mean and variance normalization of each mel-frequency
fbank = fbank - fbank.mean(axis=0)
fbank = fbank / (fbank.std(axis=0)+np.finfo(np.float32).eps)
# If the duration of a voice recording is less than 10 seconds (1000 frames),
# repeat the recording until it is longer than 10 seconds and crop.
full_frame_number = 1000
init_frame_number = fbank.shape[0]
while fbank.shape[0] < full_frame_number:
fbank = np.append(fbank, fbank[0:init_frame_number], axis=0)
fbank = fbank[0:full_frame_number,:]
return fbank
示例14: do_precision_lower_bound
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import finfo [as 別名]
def do_precision_lower_bound(self, float_small, float_large):
eps = np.finfo(float_large).eps
arr = np.array([1.0], float_small)
range = np.array([1.0 + eps, 2.0], float_large)
# test is looking for behavior when the bounds change between dtypes
if range.astype(float_small)[0] != 1:
return
# previously crashed
count, x_loc = np.histogram(arr, bins=1, range=range)
assert_equal(count, [1])
# gh-10322 means that the type comes from arr - this may change
assert_equal(x_loc.dtype, float_small)
示例15: do_precision_upper_bound
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import finfo [as 別名]
def do_precision_upper_bound(self, float_small, float_large):
eps = np.finfo(float_large).eps
arr = np.array([1.0], float_small)
range = np.array([0.0, 1.0 - eps], float_large)
# test is looking for behavior when the bounds change between dtypes
if range.astype(float_small)[-1] != 1:
return
# previously crashed
count, x_loc = np.histogram(arr, bins=1, range=range)
assert_equal(count, [1])
# gh-10322 means that the type comes from arr - this may change
assert_equal(x_loc.dtype, float_small)