本文整理汇总了Python中numpy.int64方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.int64方法的具体用法?Python numpy.int64怎么用?Python numpy.int64使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.int64方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testBasicExampleReading
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import int64 [as 别名]
def testBasicExampleReading(self):
dataset = self.problem.dataset(
tf.estimator.ModeKeys.TRAIN,
data_dir=self.data_dir,
shuffle_files=False)
examples = dataset.make_one_shot_iterator().get_next()
with tf.train.MonitoredSession() as sess:
# Check that there are multiple examples that have the right fields of the
# right type (lists of int/float).
for _ in range(10):
ex_val = sess.run(examples)
inputs, targets, floats = (ex_val["inputs"], ex_val["targets"],
ex_val["floats"])
self.assertEqual(np.int64, inputs.dtype)
self.assertEqual(np.int64, targets.dtype)
self.assertEqual(np.float32, floats.dtype)
for field in [inputs, targets, floats]:
self.assertGreater(len(field), 0)
示例2: draw_heatmap
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import int64 [as 别名]
def draw_heatmap(img, heatmap, alpha=0.5):
"""Draw a heatmap overlay over an image."""
assert len(heatmap.shape) == 2 or \
(len(heatmap.shape) == 3 and heatmap.shape[2] == 1)
assert img.dtype in [np.uint8, np.int32, np.int64]
assert heatmap.dtype in [np.float32, np.float64]
if img.shape[0:2] != heatmap.shape[0:2]:
heatmap_rs = np.clip(heatmap * 255, 0, 255).astype(np.uint8)
heatmap_rs = ia.imresize_single_image(
heatmap_rs[..., np.newaxis],
img.shape[0:2],
interpolation="nearest"
)
heatmap = np.squeeze(heatmap_rs) / 255.0
cmap = plt.get_cmap('jet')
heatmap_cmapped = cmap(heatmap)
heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
heatmap_cmapped = heatmap_cmapped * 255
mix = (1-alpha) * img + alpha * heatmap_cmapped
mix = np.clip(mix, 0, 255).astype(np.uint8)
return mix
示例3: transform
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import int64 [as 别名]
def transform(self, raw_documents):
"""Transform documents to word-id matrix.
Convert words to ids with vocabulary fitted with fit or the one
provided in the constructor.
Args:
raw_documents: An iterable which yield either str or unicode.
Yields:
x: iterable, [n_samples, max_document_length]. Word-id matrix.
"""
for tokens in self._tokenizer(raw_documents):
word_ids = np.zeros(self.max_document_length, np.int64)
for idx, token in enumerate(tokens):
if idx >= self.max_document_length:
break
word_ids[idx] = self.vocabulary_.get(token)
yield word_ids
示例4: __iter__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import int64 [as 别名]
def __iter__(self):
indices = []
for i, size in enumerate(self.group_sizes):
if size == 0:
continue
indice = np.where(self.flag == i)[0]
assert len(indice) == size
np.random.shuffle(indice)
num_extra = int(np.ceil(size / self.samples_per_gpu)
) * self.samples_per_gpu - len(indice)
indice = np.concatenate(
[indice, np.random.choice(indice, num_extra)])
indices.append(indice)
indices = np.concatenate(indices)
indices = [
indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]
for i in np.random.permutation(
range(len(indices) // self.samples_per_gpu))
]
indices = np.concatenate(indices)
indices = indices.astype(np.int64).tolist()
assert len(indices) == self.num_samples
return iter(indices)
示例5: _parse_anns
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import int64 [as 别名]
def _parse_anns(self, results, anns, img):
gt_bboxes = []
gt_labels = []
gt_masks_ann = []
for ann in anns:
x1, y1, w, h = ann['bbox']
# TODO: more essential bug need to be fixed in instaboost
if w <= 0 or h <= 0:
continue
bbox = [x1, y1, x1 + w, y1 + h]
gt_bboxes.append(bbox)
gt_labels.append(ann['category_id'])
gt_masks_ann.append(ann['segmentation'])
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
results['ann_info']['labels'] = gt_labels
results['ann_info']['bboxes'] = gt_bboxes
results['ann_info']['masks'] = gt_masks_ann
results['img'] = img
return results
示例6: prepare_sparse_params
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import int64 [as 别名]
def prepare_sparse_params(self, param_rowids):
'''Prepares the module for processing a data batch by pulling row_sparse
parameters from kvstore to all devices based on rowids.
Parameters
----------
param_rowids : dict of str to NDArray of list of NDArrays
'''
if not self._kvstore:
return
assert(isinstance(param_rowids, dict))
for param_name, rowids in param_rowids.items():
if isinstance(rowids, (tuple, list)):
rowids_1d = []
for r in rowids:
rowids_1d.append(r.reshape((-1,)).astype(np.int64))
rowid = mx.nd.concat(*rowids_1d, dim=0)
else:
rowid = rowids
param_idx = self._exec_group.param_names.index(param_name)
param_val = self._exec_group.param_arrays[param_idx]
self._kvstore.row_sparse_pull(param_name, param_val, row_ids=rowid,
priority=-param_idx)
示例7: get_params_from_kv
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import int64 [as 别名]
def get_params_from_kv(self, arg_params, aux_params):
""" Copy data from kvstore to `arg_params` and `aux_params`.
Parameters
----------
arg_params : list of NDArray
Target parameter arrays.
aux_params : list of NDArray
Target aux arrays.
Notes
-----
- This function will inplace update the NDArrays in arg_params and aux_params.
"""
assert(self._kvstore is not None)
for name, block in zip(self._exec_group.param_names, self._exec_group.param_arrays):
assert(isinstance(block, list))
if block[0].stype == 'row_sparse':
row_ids = mx.nd.arange(start=0, stop=block[0].shape[0], dtype='int64')
self._kvstore.row_sparse_pull(name, arg_params[name], row_ids=row_ids)
else:
assert(block[0].stype == 'default')
self._kvstore.pull(name, out=arg_params[name])
if len(aux_params) > 0:
raise NotImplementedError()
return arg_params, aux_params
示例8: update
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import int64 [as 别名]
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
mx.metric.check_label_shapes(labels, preds)
for label, pred in zip(labels, preds):
label = label.asnumpy()
pred = pred.asnumpy()
pred = np.column_stack((1 - pred, pred))
label = label.ravel()
num_examples = pred.shape[0]
assert label.shape[0] == num_examples, (label.shape[0], num_examples)
prob = pred[np.arange(num_examples, dtype=np.int64), np.int64(label)]
self.sum_metric += (-np.log(prob + self.eps)).sum()
self.num_inst += num_examples
示例9: update
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import int64 [as 别名]
def update(self, labels, preds):
"""
Implementation of updating metrics
"""
# get generated multi label from network
cls_prob = preds[0].asnumpy()
loc_loss = preds[1].asnumpy()
cls_label = preds[2].asnumpy()
valid_count = np.sum(cls_label >= 0)
# overall accuracy & object accuracy
label = cls_label.flatten()
mask = np.where(label >= 0)[0]
indices = np.int64(label[mask])
prob = cls_prob.transpose((0, 2, 1)).reshape((-1, cls_prob.shape[1]))
prob = prob[mask, indices]
self.sum_metric[0] += (-np.log(prob + self.eps)).sum()
self.num_inst[0] += valid_count
# smoothl1loss
self.sum_metric[1] += np.sum(loc_loss)
self.num_inst[1] += valid_count
示例10: update
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import int64 [as 别名]
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred in zip(labels, preds):
label = label.asnumpy()
pred = pred.asnumpy()
label = label.ravel()
assert label.shape[0] == pred.shape[0]
prob = pred[numpy.arange(label.shape[0]), numpy.int64(label)]
self.sum_metric += (-numpy.log(prob + self.eps)).sum()
self.num_inst += label.shape[0]
示例11: test_create_row_sparse
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import int64 [as 别名]
def test_create_row_sparse():
dim0 = 50
dim1 = 50
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
matrix = rand_ndarray(shape, 'row_sparse', density)
data = matrix.data
indices = matrix.indices
rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape)
assert rsp_created.stype == 'row_sparse'
assert same(rsp_created.data.asnumpy(), data.asnumpy())
assert same(rsp_created.indices.asnumpy(), indices.asnumpy())
rsp_copy = mx.nd.array(rsp_created)
assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
# add this test since we added np.int32 and np.int64 to integer_types
if len(shape) == 2:
for np_int_type in (np.int32, np.int64):
shape = list(shape)
shape = [np_int_type(x) for x in shape]
arg1 = tuple(shape)
mx.nd.sparse.row_sparse_array(arg1, tuple(shape))
shape[0] += 1
assert_exception(mx.nd.sparse.row_sparse_array, ValueError, arg1, tuple(shape))
示例12: _extract_labels
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import int64 [as 别名]
def _extract_labels(filename, num_labels):
"""Extract the labels into a vector of int64 label IDs.
Args:
filename: The path to an MNIST labels file.
num_labels: The number of labels in the file.
Returns:
A numpy array of shape [number_of_labels]
"""
print('Extracting labels from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_labels)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
示例13: build_inputs
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import int64 [as 别名]
def build_inputs(self):
if self.mode == "encode":
# Encode mode doesn't read from disk, so defer to parent.
return super(SkipThoughtsModel, self).build_inputs()
else:
# Replace disk I/O with random Tensors.
self.encode_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.decode_pre_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.decode_post_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.encode_mask = tf.ones_like(self.encode_ids)
self.decode_pre_mask = tf.ones_like(self.decode_pre_ids)
self.decode_post_mask = tf.ones_like(self.decode_post_ids)
示例14: test_indices_to_dense_vector_int
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import int64 [as 别名]
def test_indices_to_dense_vector_int(self):
size = 500
num_indices = 25
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
expected_output = np.zeros(size, dtype=np.int64)
expected_output[rand_indices] = 1
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(
tf_rand_indices, size, 1, dtype=tf.int64)
with self.test_session() as sess:
output = sess.run(indicator)
self.assertAllEqual(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
示例15: _convert_observ
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import int64 [as 别名]
def _convert_observ(self, observ):
"""Convert the observation to 32 bits.
Args:
observ: Numpy observation.
Raises:
ValueError: Observation contains infinite values.
Returns:
Numpy observation with 32-bit data type.
"""
if not np.isfinite(observ).all():
raise ValueError('Infinite observation encountered.')
if observ.dtype == np.float64:
return observ.astype(np.float32)
if observ.dtype == np.int64:
return observ.astype(np.int32)
return observ