本文整理汇总了Python中nengo.utils.numpy.norm函数的典型用法代码示例。如果您正苦于以下问题:Python norm函数的具体用法?Python norm怎么用?Python norm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了norm函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: similarity
def similarity(v1, v2):
# v1 and v2 are vectors
eps = np.nextafter(0, 1) # smallest float above zero
dot = np.dot(v1, v2)
dot /= max(npext.norm(v1), eps)
dot /= max(npext.norm(v2), eps)
return dot
示例2: cd_encoders_biases
def cd_encoders_biases(n_encoders, trainX, trainY, rng=np.random, mask=None,
norm_min=0.05, norm_tries=10):
"""Constrained difference (CD) method for encoders from data [1]_.
Parameters
==========
n_encoders : int
Number of encoders to generate.
trainX : (n_samples, n_dimensions) array-like
Training features.
trainY : (n_samples,) array-like
Training labels.
Returns
=======
encoders : (n_encoders, n_dimensions) array
Generated encoders.
biases : (n_encoders,) array
Generated biases. These are biases assuming `f = G[E * X + b]`,
and are therefore more like Nengo's `intercepts`.
References
==========
.. [1] McDonnell, M. D., Tissera, M. D., Vladusich, T., Van Schaik, A.,
Tapson, J., & Schwenker, F. (2015). Fast, simple and accurate
handwritten digit classification by training shallow neural network
classifiers with the "Extreme learning machine" algorithm. PLoS ONE,
10(8), 1-20. doi:10.1371/journal.pone.0134254
"""
assert trainX.shape[0] == trainY.size
trainX = trainX.reshape(trainX.shape[0], -1)
trainY = trainY.ravel()
d = trainX.shape[1]
classes = np.unique(trainY)
assert mask is None or mask.shape == (n_encoders, d)
inds = [(trainY == label).nonzero()[0] for label in classes]
train_norm = npext.norm(trainX, axis=1).mean()
encoders = np.zeros((n_encoders, d))
biases = np.zeros(n_encoders)
for k in range(n_encoders):
for _ in range(norm_tries):
i, j = rng.choice(len(classes), size=2, replace=False)
a, b = trainX[rng.choice(inds[i])], trainX[rng.choice(inds[j])]
dab = a - b
if mask is not None:
dab *= mask[k]
ndab = npext.norm(dab)**2
if ndab >= norm_min * train_norm:
break
else:
raise ValueError("Cannot find valid encoder")
encoders[k] = (2. / ndab) * dab
biases[k] = np.dot(a + b, dab) / ndab
return encoders, biases
示例3: test_sqrt_beta
def test_sqrt_beta(n, m, rng):
num_samples = 250
num_bins = 5
vectors = rng.randn(num_samples, n + m)
vectors /= npext.norm(vectors, axis=1, keepdims=True)
expectation, _ = np.histogram(
npext.norm(vectors[:, :m], axis=1), bins=num_bins)
dist = dists.SqrtBeta(n, m)
samples = dist.sample(num_samples, 1, rng=rng)
hist, _ = np.histogram(samples, bins=num_bins)
assert np.all(np.abs(np.asfarray(hist - expectation) / num_samples) < 0.16)
示例4: test_state_norm
def test_state_norm(plt):
# Choose a filter, timestep, and number of simulation timesteps
sys = Alpha(0.1)
dt = 0.000001
length = 2000000
# Modify the state-space to read out the state vector
A, B, C, D = sys2ss(sys)
old_C = C
C = np.eye(len(A))
D = np.zeros((len(A), B.shape[1]))
response = np.empty((length, len(C)))
for i in range(len(C)):
# Simulate the state vector
response[:, i] = impulse((A, B, C[i, :], D[i, :]), dt, length)
# Check that the power of each state equals the H2-norm of each state
# The analog case is the same after scaling since dt is approx 0.
actual = norm(response, axis=0) * dt
assert np.allclose(actual, state_norm(cont2discrete(sys, dt)))
assert np.allclose(actual, state_norm(sys) * np.sqrt(dt))
plt.figure()
plt.plot(response[:, 0], label="$x_0$")
plt.plot(response[:, 1], label="$x_1$")
plt.plot(np.dot(response, old_C.T), label="$y$")
plt.legend()
示例5: test_hypersphere_surface
def test_hypersphere_surface(dimensions, rng):
n = 150 * dimensions
dist = dists.UniformHypersphere(surface=True)
samples = dist.sample(n, dimensions, rng=rng)
assert samples.shape == (n, dimensions)
assert np.allclose(npext.norm(samples, axis=1), 1)
assert np.allclose(np.mean(samples, axis=0), 0, atol=0.25 / dimensions)
示例6: test_sphere
def test_sphere(d, rng):
n = 200
x = sphere.sample(n, d, rng)
assert x.shape == (n, d)
assert np.allclose(norm(x, axis=1), 1)
f = _furthest(x)
assert (f > 1.5).all()
示例7: test_hypersphere_surface
def test_hypersphere_surface(dimensions):
n = 100 * dimensions
dist = dists.UniformHypersphere(dimensions, surface=True)
samples = dist.sample(n, np.random.RandomState(1))
assert samples.shape == (n, dimensions)
assert np.allclose(npext.norm(samples, axis=1), 1)
assert np.allclose(
np.mean(samples, axis=0), np.zeros(dimensions), atol=0.1)
示例8: test_ball
def test_ball(d, rng):
n = 200
x = ball.sample(n, d, rng)
assert x.shape == (n, d)
dist = norm(x, axis=1)
assert (dist <= 1).all()
f = _furthest(x)
assert (f > dist + 0.5).all()
示例9: ciw_encoders
def ciw_encoders(n_encoders, trainX, trainY, rng=np.random,
normalize_data=True, normalize_encoders=True):
"""Computed Input Weights (CIW) method for encoders from data [1]_.
Parameters
==========
n_encoders : int
Number of encoders to generate.
trainX : (n_samples, n_dimensions) array-like
Training features.
trainY : (n_samples,) array-like
Training labels.
Returns
=======
encoders : (n_encoders, n_dimensions) array
Generated encoders.
References
==========
.. [1] McDonnell, M. D., Tissera, M. D., Vladusich, T., Van Schaik, A.,
Tapson, J., & Schwenker, F. (2015). Fast, simple and accurate
handwritten digit classification by training shallow neural network
classifiers with the "Extreme learning machine" algorithm. PLoS ONE,
10(8), 1-20. doi:10.1371/journal.pone.0134254
"""
assert trainX.shape[0] == trainY.size
trainX = trainX.reshape(trainX.shape[0], -1)
trainY = trainY.ravel()
classes = np.unique(trainY)
assert n_encoders % len(classes) == 0
n_enc_per_class = n_encoders / len(classes)
# normalize
if normalize_data:
trainX = (trainX - trainX.mean()) / trainX.std()
# trainX = (trainX - trainX.mean(axis=0)) / trainX.std()
# trainX = (trainX - trainX.mean(axis=0)) / (trainX.std(axis=0) + 1e-8)
# generate
encoders = []
for label in classes:
X = trainX[trainY == label]
plusminus = rng.choice([-1, 1], size=(X.shape[0], n_enc_per_class))
samples = np.dot(plusminus.T, X)
encoders.append(samples)
encoders = np.vstack(encoders)
if normalize_encoders:
encoders /= npext.norm(encoders, axis=1, keepdims=True)
return encoders
示例10: similarity
def similarity(data, vocab, normalize=False):
"""Return the similarity between some data and the vocabulary.
Computes the dot products between all data vectors and each
vocabulary vector. If `normalize=True`, normalizes all vectors
to compute the cosine similarity.
Parameters
----------
data: array_like
The data used for comparison.
vocab: spa.Vocabulary, array_like
Vocabulary (or list of vectors) to use to calculate
the similarity values
normalize : boolean (optional)
Whether to normalize all vectors, to compute the cosine similarity.
"""
from nengo.spa.vocab import Vocabulary
if isinstance(vocab, Vocabulary):
vectors = vocab.vectors
elif is_iterable(vocab):
vectors = np.array(vocab, copy=False, ndmin=2)
else:
raise ValidationError("%r object is not a valid vocabulary"
% (vocab.__class__.__name__), attr='vocab')
data = np.array(data, copy=False, ndmin=2)
dots = np.dot(data, vectors.T)
if normalize:
# Zero-norm vectors should return zero, so avoid divide-by-zero error
eps = np.nextafter(0, 1) # smallest float above zero
dnorm = np.maximum(npext.norm(data, axis=1, keepdims=True), eps)
vnorm = np.maximum(npext.norm(vectors, axis=1, keepdims=True), eps)
dots /= dnorm
dots /= vnorm.T
return dots
示例11: test_eval_points_scaling
def test_eval_points_scaling(Simulator, sample, radius, seed, rng):
eval_points = UniformHypersphere()
if sample:
eval_points = eval_points.sample(500, 3, rng=rng)
model = nengo.Network(seed=seed)
with model:
a = nengo.Ensemble(1, 3, eval_points=eval_points, radius=radius)
with Simulator(model) as sim:
dists = npext.norm(sim.data[a].eval_points, axis=1)
assert np.all(dists <= radius)
assert np.any(dists >= 0.9 * radius)
示例12: test_encoders
def test_encoders(n_dimensions, n_neurons=10, encoders=None):
if encoders is None:
encoders = np.random.normal(size=(n_neurons, n_dimensions))
encoders /= norm(encoders, axis=-1, keepdims=True)
model = nengo.Network(label="_test_encoders")
with model:
ens = nengo.Ensemble(neurons=nengo.LIF(n_neurons),
dimensions=n_dimensions,
encoders=encoders,
label="A")
sim = nengo.Simulator(model)
assert np.allclose(encoders, sim.data[ens].encoders)
示例13: build_lif
def build_lif(model, ens):
# Create a random number generator
rng = np.random.RandomState(model.seeds[ens])
# Get the eval points
eval_points = ensemble.gen_eval_points(ens, ens.eval_points, rng=rng)
# Get the encoders
if isinstance(ens.encoders, Distribution):
encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions, rng=rng)
encoders = np.asarray(encoders, dtype=np.float64)
else:
encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
encoders /= npext.norm(encoders, axis=1, keepdims=True)
# Get maximum rates and intercepts
max_rates = ensemble.sample(ens.max_rates, ens.n_neurons, rng=rng)
intercepts = ensemble.sample(ens.intercepts, ens.n_neurons, rng=rng)
# Build the neurons
if ens.gain is None and ens.bias is None:
gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)
elif ens.gain is not None and ens.bias is not None:
gain = ensemble.sample(ens.gain, ens.n_neurons, rng=rng)
bias = ensemble.sample(ens.bias, ens.n_neurons, rng=rng)
else:
raise NotImplementedError(
"gain or bias set for {!s}, but not both. Solving for one given "
"the other is not yet implemented.".format(ens)
)
# Scale the encoders
scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]
# Store all the parameters
model.params[ens] = BuiltEnsemble(
eval_points=eval_points,
encoders=encoders,
scaled_encoders=scaled_encoders,
max_rates=max_rates,
intercepts=intercepts,
gain=gain,
bias=bias
)
# Create the object which will handle simulation of the LIF ensemble. This
# object will be responsible for adding items to the netlist and providing
# functions to prepare the ensemble for simulation. The object may be
# modified by later methods.
model.object_operators[ens] = operators.EnsembleLIF(ens)
示例14: test_encoders
def test_encoders(n_dimensions, n_neurons=10, encoders=None):
if encoders is None:
encoders = np.random.normal(size=(n_neurons, n_dimensions))
encoders /= norm(encoders, axis=-1, keepdims=True)
args = {'label': 'A',
'neurons': nengo.LIF(n_neurons),
'dimensions': n_dimensions}
model = nengo.Model('_test_encoders')
ens = nengo.Ensemble(encoders=encoders, **args)
sim = nengo.Simulator(model)
assert np.allclose(encoders, sim.data[ens].encoders)
示例15: test_encoders
def test_encoders(RefSimulator, dimensions, seed, n_neurons=10, encoders=None):
if encoders is None:
encoders = np.random.normal(size=(n_neurons, dimensions))
encoders = npext.array(encoders, min_dims=2, dtype=np.float64)
encoders /= npext.norm(encoders, axis=1, keepdims=True)
model = nengo.Network(label="_test_encoders", seed=seed)
with model:
ens = nengo.Ensemble(n_neurons=n_neurons,
dimensions=dimensions,
encoders=encoders,
label="A")
with RefSimulator(model) as sim:
assert np.allclose(encoders, sim.data[ens].encoders)