本文整理汇总了Python中pymc3.floatX函数的典型用法代码示例。如果您正苦于以下问题:Python floatX函数的具体用法?Python floatX怎么用?Python floatX使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了floatX函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_vae
def test_vae():
minibatch_size = 10
data = pm.floatX(np.random.rand(100))
x_mini = pm.Minibatch(data, minibatch_size)
x_inp = tt.vector()
x_inp.tag.test_value = data[:minibatch_size]
ae = theano.shared(pm.floatX([.1, .1]))
be = theano.shared(pm.floatX(1.))
ad = theano.shared(pm.floatX(1.))
bd = theano.shared(pm.floatX(1.))
enc = x_inp.dimshuffle(0, 'x') * ae.dimshuffle('x', 0) + be
mu, rho = enc[:, 0], enc[:, 1]
with pm.Model():
# Hidden variables
zs = pm.Normal('zs', mu=0, sd=1, shape=minibatch_size)
dec = zs * ad + bd
# Observation model
pm.Normal('xs_', mu=dec, sd=0.1, observed=x_inp)
pm.fit(1, local_rv={zs: dict(mu=mu, rho=rho)},
more_replacements={x_inp: x_mini}, more_obj_params=[ae, be, ad, bd])
示例2: __call__
def __call__(self, x):
neg_value = np.float64(self.logp_func(pm.floatX(x)))
value = -1.0 * nan_to_high(neg_value)
if self.use_gradient:
neg_grad = self.dlogp_func(pm.floatX(x))
if np.all(np.isfinite(neg_grad)):
self.previous_x = x
grad = nan_to_num(-1.0*neg_grad)
grad = grad.astype(np.float64)
else:
self.previous_x = x
grad = None
if self.n_eval % 10 == 0:
self.update_progress_desc(neg_value, grad)
if self.n_eval > self.maxeval:
self.update_progress_desc(neg_value, grad)
self.progress.close()
raise StopIteration
self.n_eval += 1
self.progress.update(1)
if self.use_gradient:
return value, grad
else:
return value
示例3: test_hh_flow
def test_hh_flow():
cov = pm.floatX([[2, -1], [-1, 3]])
with pm.Model():
pm.MvNormal('mvN', mu=pm.floatX([0, 1]), cov=cov, shape=2)
nf = NFVI('scale-hh*2-loc')
nf.fit(25000, obj_optimizer=pm.adam(learning_rate=0.001))
trace = nf.approx.sample(10000)
cov2 = pm.trace_cov(trace)
np.testing.assert_allclose(cov, cov2, rtol=0.07)
示例4: test_var_replacement
def test_var_replacement():
X_mean = pm.floatX(np.linspace(0, 10, 10))
y = pm.floatX(np.random.normal(X_mean*4, .05))
with pm.Model():
inp = pm.Normal('X', X_mean, shape=X_mean.shape)
coef = pm.Normal('b', 4.)
mean = inp * coef
pm.Normal('y', mean, .1, observed=y)
advi = pm.fit(100)
assert advi.sample_node(mean).eval().shape == (10, )
x_new = pm.floatX(np.linspace(0, 10, 11))
assert advi.sample_node(mean, more_replacements={inp: x_new}).eval().shape == (11, )
示例5: test_free_rv
def test_free_rv(self):
with pm.Model() as model4:
Normal('n', observed=[[1, 1],
[1, 1]], total_size=[2, 2])
p4 = theano.function([], model4.logpt)
with pm.Model() as model5:
Normal('n', total_size=[2, Ellipsis, 2], shape=(1, 1), broadcastable=(False, False))
p5 = theano.function([model5.n], model5.logpt)
assert p4() == p5(pm.floatX([[1]]))
assert p4() == p5(pm.floatX([[1, 1],
[1, 1]]))
示例6: create_shared_params
def create_shared_params(self, start=None):
if start is None:
start = self.model.test_point
else:
start_ = self.model.test_point.copy()
update_start_vals(start_, start, self.model)
start = start_
start = self.gbij.map(start)
return {'mu': theano.shared(
pm.floatX(start), 'mu'),
'rho': theano.shared(
pm.floatX(np.zeros((self.global_size,))), 'rho')}
示例7: test_cloning_available
def test_cloning_available(self):
gop = generator(integers())
res = gop ** 2
shared = theano.shared(floatX(10))
res1 = theano.clone(res, {gop: shared})
f = theano.function([], res1)
assert f() == np.float32(100)
示例8: apply
def apply(self, f):
# f: kernel function for KSD f(histogram) -> (k(x,.), \nabla_x k(x,.))
stein = Stein(
approx=self.approx,
kernel=f,
use_histogram=self.approx.all_histograms,
temperature=self.temperature)
return pm.floatX(-1) * stein.grad
示例9: __local_mu_rho
def __local_mu_rho(self):
if not self.local_vars:
mu, rho = (
tt.constant(pm.floatX(np.asarray([]))),
tt.constant(pm.floatX(np.asarray([])))
)
else:
mu = []
rho = []
for var in self.local_vars:
mu.append(self.known[var][0].ravel())
rho.append(self.known[var][1].ravel())
mu = tt.concatenate(mu)
rho = tt.concatenate(rho)
mu.name = self.__class__.__name__ + '_local_mu'
rho.name = self.__class__.__name__ + '_local_rho'
return mu, rho
示例10: randidx
def randidx(self, size=None):
if size is None:
size = (1,)
elif isinstance(size, tt.TensorVariable):
if size.ndim < 1:
size = size[None]
elif size.ndim > 1:
raise ValueError('size ndim should be no more than 1d')
else:
pass
else:
size = tuple(np.atleast_1d(size))
return (self._rng
.uniform(size=size,
low=pm.floatX(0),
high=pm.floatX(self.histogram.shape[0]) - pm.floatX(1e-16))
.astype('int32'))
示例11: from_noise
def from_noise(cls, size, jitter=.01, local_rv=None,
start=None, model=None, random_seed=None, **kwargs):
"""Initialize Histogram with random noise
Parameters
----------
size : `int`
number of initial particles
jitter : `float`
initial sd
local_rv : `dict`
mapping {model_variable -> local_variable}
Local Vars are used for Autoencoding Variational Bayes
See (AEVB; Kingma and Welling, 2014) for details
start : `Point`
initial point
model : :class:`pymc3.Model`
PyMC3 model for inference
random_seed : None or `int`
leave None to use package global RandomStream or other
valid value to create instance specific one
kwargs : other kwargs passed to init
Returns
-------
:class:`Empirical`
"""
hist = cls(
None,
local_rv=local_rv,
model=model,
random_seed=random_seed,
**kwargs)
if start is None:
start = hist.model.test_point
else:
start_ = hist.model.test_point.copy()
update_start_vals(start_, start, hist.model)
start = start_
start = pm.floatX(hist.gbij.map(start))
# Initialize particles
x0 = np.tile(start, (size, 1))
x0 += pm.floatX(np.random.normal(0, jitter, x0.shape))
hist.histogram.set_value(x0)
return hist
示例12: test_observed_type
def test_observed_type(self):
X_ = np.random.randn(100, 5)
X = pm.floatX(theano.shared(X_))
with pm.Model():
x1 = pm.Normal('x1', observed=X_)
x2 = pm.Normal('x2', observed=X)
assert x1.type == X.type
assert x2.type == X.type
示例13: build_model
def build_model():
data = np.loadtxt(pm.get_data('efron-morris-75-data.tsv'), delimiter="\t",
skiprows=1, usecols=(2,3))
atbats = pm.floatX(data[:,0])
hits = pm.floatX(data[:,1])
N = len(hits)
# we want to bound the kappa below
BoundedKappa = pm.Bound(pm.Pareto, lower=1.0)
with pm.Model() as model:
phi = pm.Uniform('phi', lower=0.0, upper=1.0)
kappa = BoundedKappa('kappa', alpha=1.0001, m=1.5)
thetas = pm.Beta('thetas', alpha=phi*kappa, beta=(1.0-phi)*kappa, shape=N)
ys = pm.Binomial('ys', n=atbats, p=thetas, observed=hits)
return model
示例14: apply
def apply(self, f):
# f: kernel function for KSD f(histogram) -> (k(x,.), \nabla_x k(x,.))
input_matrix = self.get_input()
stein = Stein(
approx=self.approx,
kernel=f,
input_matrix=input_matrix,
temperature=self.temperature)
return pm.floatX(-1) * stein.grad
示例15: __init__
def __init__(self, local_rv=None, model=None,
cost_part_grad_scale=1,
scale_cost_to_minibatch=False,
random_seed=None, **kwargs):
model = modelcontext(model)
self._scale_cost_to_minibatch = theano.shared(np.int8(0))
self.scale_cost_to_minibatch = scale_cost_to_minibatch
if not isinstance(cost_part_grad_scale, theano.Variable):
self.cost_part_grad_scale = theano.shared(pm.floatX(cost_part_grad_scale))
else:
self.cost_part_grad_scale = pm.floatX(cost_part_grad_scale)
self._seed = random_seed
self._rng = tt_rng(random_seed)
self.model = model
self.check_model(model, **kwargs)
if local_rv is None:
local_rv = {}
def get_transformed(v):
if hasattr(v, 'transformed'):
return v.transformed
return v
known = {get_transformed(k): v for k, v in local_rv.items()}
self.known = known
self.local_vars = self.get_local_vars(**kwargs)
self.global_vars = self.get_global_vars(**kwargs)
self._g_order = ArrayOrdering(self.global_vars)
self._l_order = ArrayOrdering(self.local_vars)
self.gbij = DictToArrayBijection(self._g_order, {})
self.lbij = DictToArrayBijection(self._l_order, {})
self.symbolic_initial_local_matrix = tt.matrix(self.__class__.__name__ + '_symbolic_initial_local_matrix')
self.symbolic_initial_global_matrix = tt.matrix(self.__class__.__name__ + '_symbolic_initial_global_matrix')
self.global_flat_view = model.flatten(
vars=self.global_vars,
order=self._g_order,
)
self.local_flat_view = model.flatten(
vars=self.local_vars,
order=self._l_order,
)
self.symbolic_n_samples = self.symbolic_initial_global_matrix.shape[0]