本文整理匯總了Python中numpy.arange方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.arange方法的具體用法?Python numpy.arange怎麽用?Python numpy.arange使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類numpy
的用法示例。
在下文中一共展示了numpy.arange方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: add_intercept
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import arange [as 別名]
def add_intercept(self, X):
"""Add 1's to data as last features."""
# Data shape
N, D = X.shape
# Check if there's not already an intercept column
if np.any(np.sum(X, axis=0) == N):
# Report
print('Intercept is not the last feature. Swapping..')
# Find which column contains the intercept
intercept_index = np.argwhere(np.sum(X, axis=0) == N)
# Swap intercept to last
X = X[:, np.setdiff1d(np.arange(D), intercept_index)]
# Add intercept as last column
X = np.hstack((X, np.ones((N, 1))))
# Append column of 1's to data, and increment dimensionality
return X, D+1
示例2: create
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import arange [as 別名]
def create(clz):
"""One-time creation of app's objects.
This function is called once, and is responsible for
creating all objects (plots, datasources, etc)
"""
self = clz()
n_vals = 1000
self.source = ColumnDataSource(
data=dict(
top=[],
bottom=0,
left=[],
right=[],
x= np.arange(n_vals),
values= np.random.randn(n_vals)
))
# Generate a figure container
self.stock_plot = clz.create_stock(self.source)
self.update_data()
self.children.append(self.stock_plot)
示例3: generate_anchors_pre
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import arange [as 別名]
def generate_anchors_pre(height, width, feat_stride, anchor_scales=(8,16,32), anchor_ratios=(0.5,1,2)):
""" A wrapper function to generate anchors given different scales
Also return the number of anchors in variable 'length'
"""
anchors = generate_anchors(ratios=np.array(anchor_ratios), scales=np.array(anchor_scales))
A = anchors.shape[0]
shift_x = np.arange(0, width) * feat_stride
shift_y = np.arange(0, height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
K = shifts.shape[0]
# width changes faster, so here it is H, W, C
anchors = anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4)).astype(np.float32, copy=False)
length = np.int32(anchors.shape[0])
return anchors, length
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:19,代碼來源:snippets.py
示例4: batch_iter
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import arange [as 別名]
def batch_iter(self, data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.asarray(data)
print(data)
print(data.shape)
data_size = len(data)
num_batches_per_epoch = int(len(data)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
示例5: active_net_list
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import arange [as 別名]
def active_net_list(self):
net_list = [["input", 0, 0]]
active_cnt = np.arange(self.net_info.input_num + self.net_info.node_num + self.net_info.out_num)
active_cnt[self.net_info.input_num:] = np.cumsum(self.is_active)
for n, is_a in enumerate(self.is_active):
if is_a:
t = self.gene[n][0]
if n < self.net_info.node_num: # intermediate node
type_str = self.net_info.func_type[t]
else: # output node
type_str = self.net_info.out_type[t]
connections = [active_cnt[self.gene[n][i+1]] for i in range(self.net_info.max_in_num)]
net_list.append([type_str] + connections)
return net_list
# CGP with (1 + \lambda)-ES
示例6: create_mnist
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import arange [as 別名]
def create_mnist(tfrecord_dir, mnist_dir):
print('Loading MNIST from "%s"' % mnist_dir)
import gzip
with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
images = np.frombuffer(file.read(), np.uint8, offset=16)
with gzip.open(os.path.join(mnist_dir, 'train-labels-idx1-ubyte.gz'), 'rb') as file:
labels = np.frombuffer(file.read(), np.uint8, offset=8)
images = images.reshape(-1, 1, 28, 28)
images = np.pad(images, [(0,0), (0,0), (2,2), (2,2)], 'constant', constant_values=0)
assert images.shape == (60000, 1, 32, 32) and images.dtype == np.uint8
assert labels.shape == (60000,) and labels.dtype == np.uint8
assert np.min(images) == 0 and np.max(images) == 255
assert np.min(labels) == 0 and np.max(labels) == 9
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
onehot[np.arange(labels.size), labels] = 1.0
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
tfr.add_image(images[order[idx]])
tfr.add_labels(onehot[order])
#----------------------------------------------------------------------------
示例7: create_cifar100
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import arange [as 別名]
def create_cifar100(tfrecord_dir, cifar100_dir):
print('Loading CIFAR-100 from "%s"' % cifar100_dir)
import pickle
with open(os.path.join(cifar100_dir, 'train'), 'rb') as file:
data = pickle.load(file, encoding='latin1')
images = data['data'].reshape(-1, 3, 32, 32)
labels = np.array(data['fine_labels'])
assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
assert labels.shape == (50000,) and labels.dtype == np.int32
assert np.min(images) == 0 and np.max(images) == 255
assert np.min(labels) == 0 and np.max(labels) == 99
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
onehot[np.arange(labels.size), labels] = 1.0
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
tfr.add_image(images[order[idx]])
tfr.add_labels(onehot[order])
#----------------------------------------------------------------------------
示例8: test_generate_np_targeted_gives_adversarial_example
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import arange [as 別名]
def test_generate_np_targeted_gives_adversarial_example(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
feed_labs = np.zeros((100, 2))
feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1
x_adv = self.attack.generate_np(x_val, max_iterations=100,
binary_search_steps=3,
initial_const=1,
clip_min=-5, clip_max=5,
batch_size=100, y_target=feed_labs)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs)
> 0.9)
示例9: test_generate_gives_adversarial_example
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import arange [as 別名]
def test_generate_gives_adversarial_example(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
feed_labs = np.zeros((100, 2))
feed_labs[np.arange(100), orig_labs] = 1
x = tf.placeholder(tf.float32, x_val.shape)
y = tf.placeholder(tf.float32, feed_labs.shape)
x_adv_p = self.attack.generate(x, max_iterations=100,
binary_search_steps=3,
initial_const=1,
clip_min=-5, clip_max=5,
batch_size=100, y=y)
self.assertEqual(x_val.shape, x_adv_p.shape)
x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs})
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
示例10: test_generate_targeted_gives_adversarial_example
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import arange [as 別名]
def test_generate_targeted_gives_adversarial_example(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
feed_labs = np.zeros((100, 2))
feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1
x = tf.placeholder(tf.float32, x_val.shape)
y = tf.placeholder(tf.float32, feed_labs.shape)
x_adv_p = self.attack.generate(x, max_iterations=100,
binary_search_steps=3,
initial_const=1,
clip_min=-5, clip_max=5,
batch_size=100, y_target=y)
self.assertEqual(x_val.shape, x_adv_p.shape)
x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs})
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs)
> 0.9)
示例11: to_categorical
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import arange [as 別名]
def to_categorical(y, num_classes=None):
"""
Converts a class vector (integers) to binary class matrix.
This is adapted from the Keras function with the same name.
:param y: class vector to be converted into a matrix
(integers from 0 to num_classes).
:param num_classes: num_classes: total number of classes.
:return: A binary matrix representation of the input.
"""
y = np.array(y, dtype='int').ravel()
if not num_classes:
num_classes = np.max(y) + 1
warnings.warn("FutureWarning: the default value of the second"
"argument in function \"to_categorical\" is deprecated."
"On 2018-9-19, the second argument"
"will become mandatory.")
n = y.shape[0]
categorical = np.zeros((n, num_classes))
categorical[np.arange(n), y] = 1
return categorical
示例12: compute_mfcc
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import arange [as 別名]
def compute_mfcc(audio, **kwargs):
"""
Compute the MFCC for a given audio waveform. This is
identical to how DeepSpeech does it, but does it all in
TensorFlow so that we can differentiate through it.
"""
batch_size, size = audio.get_shape().as_list()
audio = tf.cast(audio, tf.float32)
# 1. Pre-emphasizer, a high-pass filter
audio = tf.concat((audio[:, :1], audio[:, 1:] - 0.97*audio[:, :-1], np.zeros((batch_size,1000),dtype=np.float32)), 1)
# 2. windowing into frames of 320 samples, overlapping
windowed = tf.stack([audio[:, i:i+400] for i in range(0,size-320,160)],1)
# 3. Take the FFT to convert to frequency space
ffted = tf.spectral.rfft(windowed, [512])
ffted = 1.0 / 512 * tf.square(tf.abs(ffted))
# 4. Compute the Mel windowing of the FFT
energy = tf.reduce_sum(ffted,axis=2)+1e-30
filters = np.load("filterbanks.npy").T
feat = tf.matmul(ffted, np.array([filters]*batch_size,dtype=np.float32))+1e-30
# 5. Take the DCT again, because why not
feat = tf.log(feat)
feat = tf.spectral.dct(feat, type=2, norm='ortho')[:,:,:26]
# 6. Amplify high frequencies for some reason
_,nframes,ncoeff = feat.get_shape().as_list()
n = np.arange(ncoeff)
lift = 1 + (22/2.)*np.sin(np.pi*n/22)
feat = lift*feat
width = feat.get_shape().as_list()[1]
# 7. And now stick the energy next to the features
feat = tf.concat((tf.reshape(tf.log(energy),(-1,width,1)), feat[:, :, 1:]), axis=2)
return feat
示例13: draw_graph
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import arange [as 別名]
def draw_graph(self, data_points, varieties):
"""
Draw all elements of the graph.
"""
self.fig, self.ax = plt.subplots()
x = np.arange(0, data_points)
self.create_lines(x, self.ax, varieties)
self.ax.legend()
self.ax.set_title(self.title)
示例14: remove_intercept
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import arange [as 別名]
def remove_intercept(self, X):
"""Remove 1's from data as last features."""
# Data shape
N, D = X.shape
# Find which column contains the intercept
intercept_index = []
for d in range(D):
if np.all(X[:, d] == 0):
intercept_index.append(d)
# Remove intercept columns
X = X[:, np.setdiff1d(np.arange(D), intercept_index)]
return X, D-len(intercept_index)
示例15: project_simplex
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import arange [as 別名]
def project_simplex(self, v, z=1.0):
"""
Project vector onto simplex using sorting.
Reference: "Efficient Projections onto the L1-Ball for Learning in High
Dimensions (Duchi, Shalev-Shwartz, Singer, Chandra, 2006)."
Parameters
----------
v : array
vector to be projected (n dimensions by 0)
z : float
constant (def: 1.0)
Returns
-------
w : array
projected vector (n dimensions by 0)
"""
# Number of dimensions
n = v.shape[0]
# Sort vector
mu = np.sort(v, axis=0)[::-1]
# Find rho
C = np.cumsum(mu) - z
j = np.arange(n) + 1
rho = j[mu - C/j > 0][-1]
# Define theta
theta = C[mu - C/j > 0][-1] / float(rho)
# Subtract theta from original vector and cap at 0
w = np.maximum(v - theta, 0)
# Return projected vector
return w