本文整理匯總了Python中numpy.float方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.float方法的具體用法?Python numpy.float怎麽用?Python numpy.float使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類numpy
的用法示例。
在下文中一共展示了numpy.float方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _project_im_rois
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import float [as 別名]
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:26,代碼來源:test.py
示例2: _coco_results_one_category
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import float [as 別名]
def _coco_results_one_category(self, boxes, cat_id):
results = []
for im_ind, index in enumerate(self.image_index):
dets = boxes[im_ind].astype(np.float)
if dets == []:
continue
scores = dets[:, -1]
xs = dets[:, 0]
ys = dets[:, 1]
ws = dets[:, 2] - xs + 1
hs = dets[:, 3] - ys + 1
results.extend(
[{'image_id': index,
'category_id': cat_id,
'bbox': [xs[k], ys[k], ws[k], hs[k]],
'score': scores[k]} for k in range(dets.shape[0])])
return results
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:19,代碼來源:coco.py
示例3: load_images
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import float [as 別名]
def load_images(input_dir, metadata_file_path, batch_shape):
"""Retrieve numpy arrays of images and labels, read from a directory."""
num_images = batch_shape[0]
with open(metadata_file_path) as input_file:
reader = csv.reader(input_file)
header_row = next(reader)
rows = list(reader)
row_idx_image_id = header_row.index('ImageId')
row_idx_true_label = header_row.index('TrueLabel')
images = np.zeros(batch_shape)
labels = np.zeros(num_images, dtype=np.int32)
for idx in xrange(num_images):
row = rows[idx]
filepath = os.path.join(input_dir, row[row_idx_image_id] + '.png')
with tf.gfile.Open(filepath, 'rb') as f:
image = np.array(
Image.open(f).convert('RGB')).astype(np.float) / 255.0
images[idx, :, :, :] = image
labels[idx] = int(row[row_idx_true_label])
return images, labels
示例4: create_graph_mutag
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import float [as 別名]
def create_graph_mutag(file):
f = open(file, 'r')
lines = f.read().splitlines()
f.close()
# get the indices of the vertext, adj list and class
idx_vertex = lines.index("#v - vertex labels")
idx_edge = lines.index("#e - edge labels")
idx_clss = lines.index("#c - Class")
# node label
vl = [int(ivl) for ivl in lines[idx_vertex+1:idx_edge]]
edge_list = lines[idx_edge+1:idx_clss]
g = nx.parse_edgelist(edge_list, nodetype=int, data=(('weight', float),), delimiter=",")
for i in range(1, g.number_of_nodes()+1):
g.node[i]['labels'] = np.array(vl[i-1])
c = int(lines[idx_clss+1])
return g, c
示例5: train
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import float [as 別名]
def train(self, train_structures, energies, forces, stresses=None, **kwargs):
"""
Training data with model.
Args:
train_structures ([Structure]): The list of Pymatgen Structure object.
energies ([float]): The list of total energies of each structure
in structures list.
energies ([float]): List of total energies of each structure in
structures list.
forces ([np.array]): List of (m, 3) forces array of each structure
with m atoms in structures list. m can be varied with each
single structure case.
stresses (list): List of (6, ) virial stresses of each
structure in structures list.
"""
train_pool = pool_from(train_structures, energies, forces, stresses)
_, df = convert_docs(train_pool)
ytrain = df['y_orig'] / df['n']
self.model.fit(inputs=train_structures, outputs=ytrain, **kwargs)
self.specie = Element(train_structures[0].symbol_set[0])
示例6: evaluate
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import float [as 別名]
def evaluate(self, test_structures, ref_energies, ref_forces, ref_stresses):
"""
Evaluate energies, forces and stresses of structures with trained
interatomic potentials.
Args:
test_structures ([Structure]): List of Pymatgen Structure Objects.
ref_energies ([float]): List of DFT-calculated total energies of
each structure in structures list.
ref_forces ([np.array]): List of DFT-calculated (m, 3) forces of
each structure with m atoms in structures list. m can be varied
with each single structure case.
ref_stresses (list): List of DFT-calculated (6, ) viriral stresses
of each structure in structures list.
"""
predict_pool = pool_from(test_structures, ref_energies,
ref_forces, ref_stresses)
_, df_orig = convert_docs(predict_pool)
_, df_predict = convert_docs(pool_from(test_structures))
outputs = self.model.predict(inputs=test_structures, override=True)
df_predict['y_orig'] = df_predict['n'] * outputs
return df_orig, df_predict
示例7: apply_cmap
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import float [as 別名]
def apply_cmap(zs, cmap, vmin=None, vmax=None, unit=None, logrescale=False):
'''
apply_cmap(z, cmap) applies the given cmap to the values in z; if vmin and/or vmax are passed,
they are used to scale z.
Note that this function can automatically rescale data into log-space if the colormap is a
neuropythy log-space colormap such as log_eccentricity. To enable this behaviour use the
optional argument logrescale=True.
'''
zs = pimms.mag(zs) if unit is None else pimms.mag(zs, unit)
zs = np.asarray(zs, dtype='float')
if pimms.is_str(cmap): cmap = matplotlib.cm.get_cmap(cmap)
if logrescale:
if vmin is None: vmin = np.log(np.nanmin(zs))
if vmax is None: vmax = np.log(np.nanmax(zs))
mn = np.exp(vmin)
u = zdivide(nanlog(zs + mn) - vmin, vmax - vmin, null=np.nan)
else:
if vmin is None: vmin = np.nanmin(zs)
if vmax is None: vmax = np.nanmax(zs)
u = zdivide(zs - vmin, vmax - vmin, null=np.nan)
u[np.isnan(u)] = -np.inf
return cmap(u)
示例8: preprocess
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import float [as 別名]
def preprocess(self, img):
"""
Preprocess a 210x160x3 uint8 frame into a 6400 (80x80) (1 x input_size)
float vector.
"""
# Crop, down-sample, erase background and set foreground to 1.
# See https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5
img = img[35:195]
img = img[::2, ::2, 0]
img[img == 144] = 0
img[img == 109] = 0
img[img != 0] = 1
curr = np.expand_dims(img.astype(np.float).ravel(), axis=0)
# Subtract the last preprocessed image.
diff = (curr - self.prev if self.prev is not None
else np.zeros((1, curr.shape[1])))
self.prev = curr
return diff
示例9: bbox_overlaps
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import float [as 別名]
def bbox_overlaps(boxes, query_boxes):
"""
determine overlaps between boxes and query_boxes
:param boxes: n * 4 bounding boxes
:param query_boxes: k * 4 bounding boxes
:return: overlaps: n * k overlaps
"""
n_ = boxes.shape[0]
k_ = query_boxes.shape[0]
overlaps = np.zeros((n_, k_), dtype=np.float)
for k in range(k_):
query_box_area = (query_boxes[k, 2] - query_boxes[k, 0] + 1) * (query_boxes[k, 3] - query_boxes[k, 1] + 1)
for n in range(n_):
iw = min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + 1
if iw > 0:
ih = min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + 1
if ih > 0:
box_area = (boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1)
all_area = float(box_area + query_box_area - iw * ih)
overlaps[n, k] = iw * ih / all_area
return overlaps
示例10: _coco_results_one_category
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import float [as 別名]
def _coco_results_one_category(self, boxes, cat_id):
results = []
for im_ind, roi_rec in enumerate(self.roidb):
index = roi_rec['index']
dets = boxes[im_ind].astype(np.float)
if len(dets) == 0:
continue
scores = dets[:, -1]
xs = dets[:, 0]
ys = dets[:, 1]
ws = dets[:, 2] - xs + 1
hs = dets[:, 3] - ys + 1
result = [{'image_id': index,
'category_id': cat_id,
'bbox': [xs[k], ys[k], ws[k], hs[k]],
'score': scores[k]} for k in range(dets.shape[0])]
results.extend(result)
return results
示例11: convert_dropout
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import float [as 別名]
def convert_dropout(node, **kwargs):
"""Map MXNet's Dropout operator attributes to onnx's Dropout operator
and return the created node.
"""
onnx = import_onnx_modules()
name = node["name"]
input_id = kwargs["index_lookup"][node["inputs"][0][0]]
input_name = kwargs["proc_nodes"][input_id].name
attrs = node["attrs"]
probability = float(attrs["p"])
dropout_node = onnx.helper.make_node(
"Dropout",
[input_name],
[name],
ratio=probability,
name=name
)
return [dropout_node]
示例12: convert_clip
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import float [as 別名]
def convert_clip(node, **kwargs):
"""Map MXNet's Clip operator attributes to onnx's Clip operator
and return the created node.
"""
onnx = import_onnx_modules()
name = node["name"]
input_idx = kwargs["index_lookup"][node["inputs"][0][0]]
proc_nodes = kwargs["proc_nodes"]
input_node = proc_nodes[input_idx].name
attrs = node["attrs"]
a_min = np.float(attrs.get('a_min', -np.inf))
a_max = np.float(attrs.get('a_max', np.inf))
clip_node = onnx.helper.make_node(
"Clip",
[input_node],
[name],
name=name,
min=a_min,
max=a_max
)
return [clip_node]
示例13: breastcancer_cont
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import float [as 別名]
def breastcancer_cont(replication=2):
f = open(path + "breast_cancer_wisconsin_cont.txt", "r")
data = np.loadtxt(f, delimiter=",", dtype=np.string0)
x_train = np.array(data[:, range(0, 9)])
y_train = np.array(data[:, 9])
for j in range(replication - 1):
x_train = np.vstack([x_train, data[:, range(0, 9)]])
y_train = np.hstack([y_train, data[:, 9]])
x_train = np.array(x_train, dtype=np.float)
f = open(path + "breast_cancer_wisconsin_cont_test.txt")
data = np.loadtxt(f, delimiter=",", dtype=np.string0)
x_test = np.array(data[:, range(0, 9)])
y_test = np.array(data[:, 9])
for j in range(replication - 1):
x_test = np.vstack([x_test, data[:, range(0, 9)]])
y_test = np.hstack([y_test, data[:, 9]])
x_test = np.array(x_test, dtype=np.float)
return x_train, y_train, x_test, y_test
示例14: iris
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import float [as 別名]
def iris(replication=2):
f = open(path + "iris.txt")
data = np.loadtxt(f, delimiter=",", dtype=np.string0)
x_train = np.array(data[:, range(0, 4)], dtype=np.float)
y_train = data[:, 4]
for j in range(replication - 1):
x_train = np.vstack([x_train, data[:, range(0, 4)]])
y_train = np.hstack([y_train, data[:, 4]])
x_train = np.array(x_train, dtype=np.float)
f = open(path + "iris_test.txt")
data = np.loadtxt(f, delimiter=",", dtype=np.string0)
x_test = np.array(data[:, range(0, 4)], dtype=np.float)
y_test = data[:, 4]
for j in range(replication - 1):
x_test = np.vstack([x_test, data[:, range(0, 4)]])
y_test = np.hstack([y_test, data[:, 4]])
x_test = np.array(x_test, dtype=np.float)
return x_train, y_train, x_test, y_test
示例15: compute_cor_loc
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import float [as 別名]
def compute_cor_loc(num_gt_imgs_per_class,
num_images_correctly_detected_per_class):
"""Compute CorLoc according to the definition in the following paper.
https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf
Returns nans if there are no ground truth images for a class.
Args:
num_gt_imgs_per_class: 1D array, representing number of images containing
at least one object instance of a particular class
num_images_correctly_detected_per_class: 1D array, representing number of
images that are correctly detected at least one object instance of a
particular class
Returns:
corloc_per_class: A float numpy array represents the corloc score of each
class
"""
return np.where(
num_gt_imgs_per_class == 0,
np.nan,
num_images_correctly_detected_per_class / num_gt_imgs_per_class)