本文整理汇总了Python中sympy.printing.theanocode.theano_function函数的典型用法代码示例。如果您正苦于以下问题:Python theano_function函数的具体用法?Python theano_function怎么用?Python theano_function使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了theano_function函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: prepareStatment
def prepareStatment(self):
A = MatrixSymbol('A',*self.A.shape)
H = MatrixSymbol('H',*self.H.shape)
x = MatrixSymbol('x',*self.x.shape)
P = MatrixSymbol('P',self.H.shape[1],self.H.shape[1])
Q = MatrixSymbol('Q',*self.Q.shape)
R = MatrixSymbol('R',*self.R.shape)
I = MatrixSymbol('I',max(x.shape),max(x.shape))
measurement = MatrixSymbol('measurement', *(H * x).shape)
#Update
y = measurement - H * x
S = H * P * H.T + R
K = P * H.T * S.I
up_x = x + K * y
up_P = (I - K * H) * P
inputs = [x,P,H,R,I,measurement]
outputs = [up_x, up_P]
dtypes = {inp: 'float64' for inp in inputs}
self.update = theano_function(inputs, outputs, dtypes=dtypes)
#Predict
pre_x = A * x
pre_P = A * P * A.T + Q
inputs = [A,x,P,Q]
outputs = [pre_x, pre_P]
dtypes = {inp: 'float64' for inp in inputs}
self.predict = theano_function(inputs, outputs, dtypes=dtypes)
示例2: test_BlockMatrix_Inverse_execution
def test_BlockMatrix_Inverse_execution():
k, n = 2, 4
dtype = 'float32'
A = sympy.MatrixSymbol('A', n, k)
B = sympy.MatrixSymbol('B', n, n)
inputs = A, B
output = B.I*A
cutsizes = {A: [(n/2, n/2), (k/2, k/2)],
B: [(n/2, n/2), (n/2, n/2)]}
cutinputs = [sympy.blockcut(i, *cutsizes[i]) for i in inputs]
cutoutput = output.subs(dict(zip(inputs, cutinputs)))
dtypes = dict(zip(inputs, [dtype]*len(inputs)))
f = theano_function(inputs, [output], dtypes=dtypes)
fblocked = theano_function(inputs, [sympy.block_collapse(cutoutput)],
dtypes=dtypes)
import numpy
ninputs = [numpy.random.rand(*x.shape).astype(dtype) for x in inputs]
ninputs = [numpy.arange(n*k).reshape(A.shape).astype(dtype),
numpy.eye(n).astype(dtype)]
ninputs[1] += numpy.ones(B.shape)*1e-5
assert numpy.allclose(f(*ninputs), fblocked(*ninputs), rtol=1e-5)
示例3: test_theano_function_numpy
def test_theano_function_numpy():
import numpy as np
f = theano_function([x, y], [x+y], dim=1)
assert np.linalg.norm(f([1, 2], [3, 4]) - np.asarray([4, 6])) < 1e-9
f = theano_function([x, y], [x+y], dtypes={x: 'float64', y: 'float64'},
dim=1)
xx = np.arange(3).astype('float64')
yy = 2*np.arange(3).astype('float64')
assert np.linalg.norm(f(xx, yy) - 3*np.arange(3)) < 1e-9
示例4: test_theano_function_kwargs
def test_theano_function_kwargs():
import numpy as np
f = theano_function([x, y, z], [x+y], dim=1, on_unused_input='ignore',
dtypes={x: 'float64', y: 'float64', z: 'float64'})
assert np.linalg.norm(f([1, 2], [3, 4], [0, 0]) - np.asarray([4, 6])) < 1e-9
f = theano_function([x, y, z], [x+y],
dtypes={x: 'float64', y: 'float64', z: 'float64'},
dim=1, on_unused_input='ignore')
xx = np.arange(3).astype('float64')
yy = 2*np.arange(3).astype('float64')
zz = 2*np.arange(3).astype('float64')
assert np.linalg.norm(f(xx, yy, zz) - 3*np.arange(3)) < 1e-9
示例5: _theanoize
def _theanoize(self, outputs):
self.define_inputs()
old_check_input = theano.config.check_input
old_allow_gc = theano.config.allow_gc
try:
# This affects compilation and removes the input check at each step.
theano.config.check_input = False
# Disable Theano garbage collection to lower the number of allocations.
theano.config.allow_gc = False
f_imp = theano_function(self.inputs, outputs,
on_unused_input='ignore',
mode=theano.Mode(linker='c'))
finally:
theano.config.check_input = old_check_input
theano.config.allow_gc = old_allow_gc
# While denoting an input as trusted lowers Theano overhead:
# f.trust_input = True
# we can bypass additional overhead with the following function:
def f(*args):
for i in range(len(args)):
f_imp.input_storage[i].storage[0] = args[i]
f_imp.fn()
return [f_imp.output_storage[i].data for i in range(len(outputs))]
return f
示例6: sympy_theanify
def sympy_theanify(sympy_expr, symbols=()):
if isinstance(sympy_expr, Expr):
if not symbols:
symbols = sympy_expr.free_symbols
return theano_function(symbols, [sympy_expr])
else:
return lambda **kwargs: sympy_expr
示例7: theano_lambdify
def theano_lambdify(args, expr):
"""
Lambdify expression expr w.r.t arguments args using theano.
"""
theano_opts = {'on_unused_input': 'ignore',
'allow_input_downcast': False}
# detect if expression is a vector
if isinstance(expr, types.vector_types):
# Below converts output of 1D vector functions with length 1...
# ... back to 1D numpy array, because the default is 0D array (scalar).
if len(expr) == 1:
def getslice(f):
return numpy.asarray(f, dtype=DTYPE)[numpy.newaxis]
else:
def getslice(f):
return numpy.asarray(f, dtype=DTYPE)
else:
expr = [expr, ]
def getslice(f):
return numpy.asarray(f, dtype=DTYPE)
# theano does not accept sympy numbers as output...
expr_lambda = theano_function(args, expr, **theano_opts)
return lambda *args: getslice(expr_lambda(*args))
示例8: make_theano_fns_of_d1d2xw
def make_theano_fns_of_d1d2xw(self, dg_first, dg_second):
args = self.normal_x_s_d.values() + self.normal_w_s_d.values() + \
self.param_sym_dict.values()
args_values_x = [x.subs(self.normal_xw_s_ss_values_d)
for x in self.normal_x_s_d.values()]
args_values_w = [w.subs(self.normal_xw_s_ss_values_d)
for w in self.normal_w_s_d.values()]
args_values_p = [p.subs(self.par_to_values_dict)
for p in self.param_sym_dict.values()]
args_values = args_values_x + args_values_w + args_values_p
dg_first_th = theano_function(args, dg_first, on_unused_input='ignore')
dg_second_th = theano_function(
args, dg_second, on_unused_input='ignore')
return dg_first_th, dg_second_th, args_values
示例9: _theanoize
def _theanoize(self, outputs):
self.define_inputs()
f = theano_function(self.inputs, outputs, on_unused_input='ignore')
# Theano will run faster if you trust the input. I'm not sure
# what the implications of this are. See:
# http://deeplearning.net/software/theano/tutorial/faq.html#faster-small-theano-function
# Note that map(np.asarray, np.hstack(args)) is required if
# trust_input is True. If it is False, then it will sanitize the
# inputs. I'm not sure which one is faster.
f.trust_input = True
return f
示例10: __init__
def __init__(self, var_names_and_syms={}, dict_or_expr={}):
if hasattr(dict_or_expr, 'keys'):
for k, v in dict_or_expr.items():
dict_or_expr[k] = CompyledFunc(var_names_and_syms=var_names_and_syms, dict_or_expr=v)
self.Compyled = dict_or_expr
elif is_non_atomic_sympy_expr(dict_or_expr):
self.Vars = tuple(var for var, symbol in var_names_and_syms.items()
if symbol and not(isinstance(symbol, FLOAT_TYPES)))
inputs = (var_names_and_syms[var] for var in self.Vars)
if use_theano:
self.Compyled = theano_function(inputs, (dict_or_expr,), allow_input_downcast=True)
else:
self.Compyled = ufuncify(inputs, dict_or_expr)
else:
self.Compyled = sympy_to_float(dict_or_expr)
示例11: prepareStatment
def prepareStatment(self):
measurement = MatrixSymbol('measurement', *(self.H * self.x).shape)
#Update
y = measurement - self.H * self.x
S = self.H * self.P * self.H.T + self.R
K = self.P * self.H.T * S.I
upx = self.x + K * y
upP = self.I - K * self.H
#Predict
new_x = self.A * upx
new_P = self.A * upP * self.A.T + self.Q
inputs = [self.A,self.x,self.P,self.H,self.R,self.I,self.Q,measurement]
outputs = [new_x, new_P]
dtypes = {inp: 'float64' for inp in inputs}
self.theano_update = theano_function(inputs, outputs, dtypes=dtypes)
示例12: test_theano_function_simple
def test_theano_function_simple():
f = theano_function([x, y], [x+y])
assert f(2, 3) == 5
示例13: generate_ode_function
def generate_ode_function(mass_matrix, forcing_vector, constants,
coordinates, speeds, specified=None,
generator='lambdify'):
"""Returns a numerical function which can evaluate the right hand side
of the first order ordinary differential equations from a system
described by:
M(constants, coordinates) x' = F(constants, coordinates, speeds, specified)
Parameters
----------
mass_matrix : sympy.Matrix, shape(n,n)
The symbolic mass matrix of the system.
forcing_vector : sympy.Matrix, shape(n,1)
The symbolic forcing vector of the system.
constants : list of sympy.Symbol
The constants in the equations of motion.
coordinates : list of sympy.Function
The generalized coordinates of the system.
speeds : list of sympy.Function
The generalized speeds of the system.
specified : list of sympy.Function
The specifed quantities of the system.
generator : string, {'lambdify'|'theano'|'cython'}, optional
The method used for generating the numeric right hand side.
Returns
-------
evaluate_ode_function : function
A function which evaluates the derivaties of the states.
"""
if generator == 'lambdify' or generator == 'theano':
arguments = constants + coordinates + speeds
if specified is not None:
arguments += specified
if generator == 'lambdify':
mass_matrix_func = lambdify(arguments, mass_matrix)
forcing_vector_func = lambdify(arguments, forcing_vector)
elif generator == 'theano':
mass_matrix_func = theano_function(arguments, [mass_matrix],
on_unused_input='ignore')
forcing_vector_func = theano_function(arguments,
[forcing_vector],
on_unused_input='ignore')
# Theano will run faster if you trust the input. I'm not sure
# what the implications of this are. See:
# http://deeplearning.net/software/theano/tutorial/faq.html#faster-small-theano-function
mass_matrix_func.trust_input = True
forcing_vector_func.trust_input = True
def mass_forcing_func(numerical_constants, numerical_coordinates,
numerical_speeds, numerical_specified=None):
"""Returns numerical evaluations of the mass matrix and forcing
vector."""
values = [numerical_constants, numerical_coordinates,
numerical_speeds]
if specified is not None:
values.append(numerical_specified)
value_array = np.hstack(tuple(values))
if generator == 'theano':
value_array = [np.asarray(v) for v in value_array]
return (mass_matrix_func(*value_array),
forcing_vector_func(*value_array))
elif generator == 'cython':
filename_prefix = 'multibody_system'
# TODO : This is a hack to allow you to regenerate cython modules
# without closing the Python session. It may be best to also force
# the user to provide a module name when generating the Cython code.
# Check out the Cython inline code to figure out how to do all this
# better with disutils:
# https://github.com/cython/cython/blob/master/Cython/Build/Inline.py
# The .pyx file has the same prefix as the Cython generated [.dll,
# .so, .dylib] shared library file, so we should be able to check
# all files in the directory for matches except the .pyx file.
prefixes = [os.path.splitext(p)[0] for p in os.listdir('.') if not
p.endswith('.pyx')]
while True:
if filename_prefix in prefixes:
filename_prefix += '_' + random.choice(all_letters)
else:
break
cython_generator = CythonGenerator(filename_prefix, mass_matrix,
forcing_vector, constants,
coordinates, speeds,
specified=specified)
#.........这里部分代码省略.........
示例14: MatrixSymbol
from numpy import array
from sympy.matrices import MatrixSymbol, BlockMatrix, Matrix
x = MatrixSymbol('x', 2, 2)
b = BlockMatrix([[x, x]])
from numpy import array
a = array([[1, 2],
[3, 4]])
m = BlockMatrix([i for i in range(5)])
o = log(2) + log(3)
f = theano_function([], o)
from frozendict import frozendict
from MathFunc import MathFunc
d0 = MathFunc(dict.fromkeys(('a', 'b')),
{frozendict(a=1, b=2): 3,
frozendict(a=10, b=20): 30})
d = MathFunc(dict.fromkeys(('a', 'b')),
{frozendict(a=1, b=2): 3,
frozendict(a=10, b=20): 30})
d1 = MathFunc(dict.fromkeys(('b', 'c')),
{frozendict(c=3, b=2): 30,
示例15: build_mixture_loss_and_grad
#.........这里部分代码省略.........
return True
def _eval_is_finite(self):
return True
def fdiff(self, argindex):
r, mu, sigma, lamda = self.args
# if mu=0 and sigma=1, then this is
# just the inverse standard erf so return erfi
if mu == 0 and sigma == 1:
return sympy.diff(sympy.erfi(r), self.args[argindex-1])
tmp = sympy.symbols("tmp", real=True, finite=True)
z_s = GaussianMixtureCDF(tmp, mu, sigma, lamda)
inv_diff = sympy.diff(z_s, self.args[argindex-1])
return sympy.simplify(1/inv_diff.subs(tmp, self))
# create symbols for the modle params
lamda_s, sigma_s = sympy.symbols(
"lamda, sigma", positive=True, real=True, finite=True)
mu_s, rho_s = sympy.symbols(
"mu, rho", real=True, finite=True)
# if we are building the pseudo functiosn then
# the ranks are what we actually observe, so we need
# to wrap them in an inverse CDF call
if build_pseudo_functions:
r1_s = sympy.symbols("r1_s", real=True, finite=True, positive=True)
r2_s = sympy.symbols("r2_s", real=True, finite=True, positive=True)
z1_s = GaussianMixtureCDF_inverse(r1_s, mu_s, sigma_s, lamda_s)
z2_s = GaussianMixtureCDF_inverse(r2_s, mu_s, sigma_s, lamda_s)
# otherwise we just use standard symbols for the obsreved values
else:
z1_s, z2_s = sympy.symbols("z1, z2", real=True, finite=True)
####### build the marginal densities
std_z1_s = (z2_s - mu_s)/sigma_s
std_z2_s = (z1_s - mu_s)/sigma_s
####### bivariate normal density
sym_signal_density = (
1./(2.*sympy.pi*sigma_s*sigma_s)
)*(
1./sympy.sqrt(1.-rho_s**2)
)*sympy.exp(-(
std_z1_s**2 + std_z2_s**2 - 2*rho_s*std_z1_s*std_z2_s
)/(2*(1-rho_s**2)))
sym_noise_density = (
1./(2.*sympy.pi)
)*sympy.exp(-(z1_s**2 + z2_s**2)/2)
sym_log_lhd = sympy.simplify(sympy.log(lamda_s*sym_signal_density
+ (1-lamda_s)*sym_noise_density))
# we use the following in the theano calls instead of the z's
# so that theano won't choke
pv_1, pv_2 = sympy.symbols('pv_1 pv_2', real=True, finite=True)
# differentiate, replace the inverse micture CDF's with pv_'s,
# and then build the theano functions
sym_gradients = []
for sym in (mu_s, sigma_s, rho_s, lamda_s):
sym_grad = sympy.diff(sym_log_lhd, sym)
pv_sym_grad = sym_grad.subs({z1_s: pv_1, z2_s: pv_2})
sym_gradients.append( pv_sym_grad )
theano_gradient = theano_function(
(mu_s, sigma_s, rho_s, lamda_s, pv_1, pv_2),
sym_gradients,
dims={mu_s:1, sigma_s:1, rho_s:1, lamda_s:1, pv_1: 1, pv_2:1})
theano_log_lhd = theano_function(
(mu_s, sigma_s, rho_s, lamda_s, pv_1, pv_2),
[sym_log_lhd.subs({z1_s: pv_1, z2_s: pv_2}),],
dims={mu_s:1, sigma_s:1, rho_s:1, lamda_s:1, pv_1: 1, pv_2:1})
# wrap the theano functions in python functions, and return them
def calc_log_lhd(theta, z1, z2):
mu, sigma, rho, lamda = theta
return theano_log_lhd(
numpy.repeat(mu, len(z1)),
numpy.repeat(sigma, len(z1)),
numpy.repeat(rho, len(z1)),
numpy.repeat(lamda, len(z1)),
z1, z2 ).sum()
def calc_log_lhd_gradient(theta, z1, z2, fix_mu, fix_sigma):
mu, sigma, rho, lamda = theta
res = theano_gradient(
numpy.repeat(mu, len(z1)),
numpy.repeat(sigma, len(z1)),
numpy.repeat(rho, len(z1)),
numpy.repeat(lamda, len(z1)),
z1, z2 )
return numpy.array( [x.sum() for x in res] )
return calc_log_lhd, calc_log_lhd_gradient