本文整理汇总了Python中neupy.utils.asfloat函数的典型用法代码示例。如果您正苦于以下问题:Python asfloat函数的具体用法?Python asfloat怎么用?Python asfloat使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了asfloat函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: quadratic_minimizer
def quadratic_minimizer(x_a, y_a, y_prime_a, x_b, y_b, bound_size_ratio=0.1):
"""
Finds the minimizer for a quadratic polynomial that
goes through the points (x_a, y_a), (x_b, y_b) with derivative
at x_a of y_prime_a.
Parameters
----------
x_a : float or theano variable
Left point ``a`` in the ``x`` axis.
y_a : float or theano variable
Output from function ``y`` at point ``a``.
y_prime_a : float or theano variable
Output from function ``y'`` (``y`` derivative) at
point ``a``.
x_b : float or theano variable
Right point ``a`` in the ``x`` axis.
y_b : float or theano variable
Output from function ``y`` at point ``b``.
bound_size_ratio : float
Value control acceptable bounds for interpolation. If value
close to one of the points interpolation result will be ignored.
The bigger ratio, the more likely to reject interpolation.
Value needs to be between ``0`` and ``1``. Defaults to ``0.1``.
Returns
-------
object
Theano variable that after evaluation is equal to
point ``x`` which is minimizer for quadratic function.
"""
if not 0 <= bound_size_ratio < 1:
raise ValueError("Value ``bound_size_ratio`` need to be a float "
"between 0 and 1, got {}".format(bound_size_ratio))
# The main formula works for the region [0, a] we need to
# shift function to the left side and put point ``a``
# at ``0`` position.
x_range = x_b - x_a
coef = (y_b - y_a - y_prime_a * x_range) / (x_range ** 2)
minimizer = -y_prime_a / (asfloat(2) * coef) + x_a
bound_size_ratio = asfloat(bound_size_ratio)
return T.switch(
sequential_or(
# Handle bad cases
T.eq(x_range, zero),
coef <= zero,
T.gt(minimizer, x_b - bound_size_ratio * x_range),
T.lt(minimizer, x_a + bound_size_ratio * x_range),
),
x_a + asfloat(0.5) * x_range,
# Since we shifted funciton to the left, we need to shift
# the result to the right to make it correct for
# the specified region. That's why we are adding ``x_a``
# at the end.
-y_prime_a / (asfloat(2) * coef) + x_a
)
示例2: init_param_updates
def init_param_updates(self, layer, parameter):
step = self.variables.step
epsilon = self.epsilon
parameter_shape = parameter.get_value().shape
prev_mean_squred_grad = theano.shared(
name="{}/prev-mean-squred-grad".format(parameter.name),
value=asfloat(np.zeros(parameter_shape)),
)
prev_mean_squred_dx = theano.shared(
name="{}/prev-mean-squred-dx".format(parameter.name),
value=asfloat(np.zeros(parameter_shape)),
)
gradient = T.grad(self.variables.error_func, wrt=parameter)
mean_squred_grad = (
self.decay * prev_mean_squred_grad +
(1 - self.decay) * gradient ** 2
)
parameter_delta = gradient * (
T.sqrt(prev_mean_squred_dx + epsilon) /
T.sqrt(mean_squred_grad + epsilon)
)
mean_squred_dx = (
self.decay * prev_mean_squred_dx +
(1 - self.decay) * parameter_delta ** 2
)
return [
(prev_mean_squred_grad, mean_squred_grad),
(prev_mean_squred_dx, mean_squred_dx),
(parameter, parameter - step * parameter_delta),
]
示例3: test_upscale_layer
def test_upscale_layer(self):
input_value = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
]).reshape((1, 1, 2, 4))
expected_output = np.array([
[1, 1, 2, 2, 3, 3, 4, 4],
[1, 1, 2, 2, 3, 3, 4, 4],
[1, 1, 2, 2, 3, 3, 4, 4],
[5, 5, 6, 6, 7, 7, 8, 8],
[5, 5, 6, 6, 7, 7, 8, 8],
[5, 5, 6, 6, 7, 7, 8, 8],
]).reshape((1, 1, 6, 8))
upscale_layer = layers.Upscale((3, 2))
connection = layers.Input((1, 2, 4)) > upscale_layer
x = T.tensor4('x')
actual_output = upscale_layer.output(x)
actual_output = actual_output.eval({x: asfloat(input_value)})
np.testing.assert_array_almost_equal(
asfloat(expected_output),
actual_output
)
示例4: init_param_updates
def init_param_updates(self, layer, parameter):
step = self.variables.step
parameter_shape = T.shape(parameter).eval()
prev_delta = theano.shared(
name="{}/prev-delta".format(parameter.name),
value=asfloat(np.zeros(parameter_shape)),
)
prev_gradient = theano.shared(
name="{}/prev-grad".format(parameter.name),
value=asfloat(np.zeros(parameter_shape)),
)
gradient = T.grad(self.variables.error_func, wrt=parameter)
grad_delta = T.abs_(prev_gradient - gradient)
parameter_delta = ifelse(
T.eq(self.variables.epoch, 1),
gradient,
T.clip(
T.abs_(prev_delta) * gradient / grad_delta,
-self.upper_bound,
self.upper_bound
)
)
return [
(parameter, parameter - step * parameter_delta),
(prev_gradient, gradient),
(prev_delta, parameter_delta),
]
示例5: test_mixture_of_experts
def test_mixture_of_experts(self):
dataset = datasets.load_diabetes()
data, target = asfloat(dataset.data), asfloat(dataset.target)
insize, outsize = data.shape[1], 1
input_scaler = preprocessing.MinMaxScaler((-1 ,1))
output_scaler = preprocessing.MinMaxScaler()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
input_scaler.fit_transform(data),
output_scaler.fit_transform(target.reshape(-1, 1)),
train_size=0.8
)
n_epochs = 10
scaled_y_test = output_scaler.inverse_transform(y_test)
scaled_y_test = scaled_y_test.reshape((y_test.size, 1))
# -------------- Train single GradientDescent -------------- #
bpnet = algorithms.GradientDescent(
(insize, 20, outsize),
step=0.1,
verbose=False
)
bpnet.train(x_train, y_train, epochs=n_epochs)
network_output = bpnet.predict(x_test)
network_error = rmsle(output_scaler.inverse_transform(network_output),
scaled_y_test)
# -------------- Train ensemlbe -------------- #
moe = algorithms.MixtureOfExperts(
networks=[
algorithms.Momentum(
(insize, 20, outsize),
step=0.1,
batch_size=1,
verbose=False
),
algorithms.Momentum(
(insize, 20, outsize),
step=0.1,
batch_size=1,
verbose=False
),
],
gating_network=algorithms.Momentum(
layers.Softmax(insize) > layers.Output(2),
step=0.1,
verbose=False
)
)
moe.train(x_train, y_train, epochs=n_epochs)
ensemble_output = moe.predict(x_test)
ensemlbe_error = rmsle(
output_scaler.inverse_transform(ensemble_output),
scaled_y_test
)
self.assertGreater(network_error, ensemlbe_error)
示例6: init_param_updates
def init_param_updates(self, layer, parameter):
epoch = self.variables.epoch
step = self.variables.step
beta1 = self.beta1
beta2 = self.beta2
parameter_shape = T.shape(parameter).eval()
prev_first_moment = theano.shared(
name="{}/prev-first-moment".format(parameter.name),
value=asfloat(np.zeros(parameter_shape)),
)
prev_weighted_inf_norm = theano.shared(
name="{}/prev-weighted-inf-norm".format(parameter.name),
value=asfloat(np.zeros(parameter_shape)),
)
gradient = T.grad(self.variables.error_func, wrt=parameter)
first_moment = beta1 * prev_first_moment + (1 - beta1) * gradient
weighted_inf_norm = T.maximum(beta2 * prev_weighted_inf_norm,
T.abs_(gradient))
parameter_delta = (
(1 / (1 - beta1 ** epoch)) *
(first_moment / (weighted_inf_norm + self.epsilon))
)
return [
(prev_first_moment, first_moment),
(prev_weighted_inf_norm, weighted_inf_norm),
(parameter, parameter - step * parameter_delta),
]
示例7: init_variables
def init_variables(self):
super(ConjugateGradient, self).init_variables()
n_parameters = count_parameters(self.connection)
self.variables.update(
prev_delta=theano.shared(name="conj-grad/prev-delta", value=asfloat(np.zeros(n_parameters))),
prev_gradient=theano.shared(name="conj-grad/prev-gradient", value=asfloat(np.zeros(n_parameters))),
)
示例8: test_batch_norm_as_shared_variable
def test_batch_norm_as_shared_variable(self):
gamma = theano.shared(value=asfloat(np.ones(2)))
beta = theano.shared(value=asfloat(2 * np.ones(2)))
batch_norm = layers.BatchNorm(gamma=gamma, beta=beta)
layers.Input(10) > batch_norm
self.assertIs(gamma, batch_norm.gamma)
self.assertIs(beta, batch_norm.beta)
示例9: test_concatenate_basic
def test_concatenate_basic(self):
concat_layer = layers.Concatenate(axis=1)
x1 = T.tensor4()
x2 = T.tensor4()
y = theano.function([x1, x2], concat_layer.output(x1, x2))
x1_tensor4 = asfloat(np.random.random((1, 2, 3, 4)))
x2_tensor4 = asfloat(np.random.random((1, 8, 3, 4)))
output = y(x1_tensor4, x2_tensor4)
self.assertEqual((1, 10, 3, 4), output.shape)
示例10: init_layers
def init_layers(self):
super(Quickprop, self).init_layers()
for layer in self.layers:
for parameter in layer.parameters:
parameter_shape = T.shape(parameter).eval()
parameter.prev_delta = theano.shared(
name="prev_delta_" + parameter.name,
value=asfloat(np.zeros(parameter_shape)),
)
parameter.prev_gradient = theano.shared(
name="prev_grad_" + parameter.name,
value=asfloat(np.zeros(parameter_shape)),
)
示例11: init_layers
def init_layers(self):
super(Adadelta, self).init_layers()
for layer in self.layers:
for parameter in layer.parameters:
parameter_shape = T.shape(parameter).eval()
parameter.prev_mean_squred_grad = theano.shared(
name="prev_mean_squred_grad_" + parameter.name,
value=asfloat(np.zeros(parameter_shape)),
)
parameter.prev_mean_squred_dx = theano.shared(
name="prev_mean_squred_dx_" + parameter.name,
value=asfloat(np.zeros(parameter_shape)),
)
示例12: init_layers
def init_layers(self):
super(Adamax, self).init_layers()
for layer in self.layers:
for parameter in layer.parameters:
parameter_shape = T.shape(parameter).eval()
parameter.prev_first_moment = theano.shared(
name="prev_first_moment_" + parameter.name,
value=asfloat(np.zeros(parameter_shape)),
)
parameter.prev_weighted_inf_norm = theano.shared(
name="prev_weighted_inf_norm_" + parameter.name,
value=asfloat(np.zeros(parameter_shape)),
)
示例13: test_elementwise_basic
def test_elementwise_basic(self):
elem_layer = layers.Elementwise(merge_function=T.add)
x1 = T.matrix()
x2 = T.matrix()
y = theano.function([x1, x2], elem_layer.output(x1, x2))
x1_matrix = asfloat(np.random.random((10, 2)))
x2_matrix = asfloat(np.random.random((10, 2)))
expected_output = x1_matrix + x2_matrix
actual_output = y(x1_matrix, x2_matrix)
np.testing.assert_array_almost_equal(expected_output, actual_output)
示例14: test_jacobian_for_levenberg_marquardt
def test_jacobian_for_levenberg_marquardt(self):
w1 = theano.shared(name='w1', value=asfloat(np.array([[1]])))
b1 = theano.shared(name='b1', value=asfloat(np.array([0])))
w2 = theano.shared(name='w2', value=asfloat(np.array([[2]])))
b2 = theano.shared(name='b2', value=asfloat(np.array([1])))
x = T.matrix('x')
y = T.matrix('y')
output = ((x.dot(w1.T) + b1) ** 2).dot(w2.T) + b2
error_func = T.mean((y - output), axis=1)
x_train = asfloat(np.array([[1, 2, 3]]).T)
y_train = asfloat(np.array([[1, 2, 3]]).T)
output_expected = asfloat(np.array([[3, 9, 19]]).T)
np.testing.assert_array_almost_equal(
output.eval({x: x_train}),
output_expected
)
jacobian_expected = asfloat(np.array([
[-4, -4, -1, -1],
[-16, -8, -4, -1],
[-36, -12, -9, -1],
]))
jacobian_actual = compute_jacobian(error_func, [w1, b1, w2, b2])
np.testing.assert_array_almost_equal(
jacobian_expected,
jacobian_actual.eval({x: x_train, y: y_train})
)
示例15: test_categorical_hinge_without_one_hot_encoding
def test_categorical_hinge_without_one_hot_encoding(self):
targets = asfloat(np.array([2, 0]))
predictions = asfloat(np.array([
[0.1, 0.2, 0.7],
[0.0, 0.9, 0.1],
]))
expected = asfloat(np.array([0.5, 1.9]).mean())
prediction_var = T.matrix()
target_var = T.vector()
error_output = errors.categorical_hinge(target_var, prediction_var)
actual = error_output.eval({prediction_var: predictions,
target_var: targets})
self.assertAlmostEqual(expected, actual)