本文整理汇总了Python中pymc.invlogit函数的典型用法代码示例。如果您正苦于以下问题:Python invlogit函数的具体用法?Python invlogit怎么用?Python invlogit使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了invlogit函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: logit_normal_draw
def logit_normal_draw(cf_mean, std, N, J):
std = pl.array(std)
if mc.__version__ == '2.0rc2': # version on Omak
X = [mc.invlogit(mc.rnormal(mu=cf_mean, tau=std**-2)) for n in range(N)]
Y = pl.array(X)
else:
X = mc.rnormal(mu=cf_mean, tau=std**-2, size=(N,J))
Y = mc.invlogit(X)
return Y
示例2: generate_synthetic_data
def generate_synthetic_data(truth, key, d):
""" create simulated data"""
a0 = d['age_start']
a1 = d['age_end']
age_weights = d['age_weights']
d.update(condition='type_2_diabetes',
year_start=y,
year_end=y)
p0 = dismod3.utils.rate_for_range(truth[key], range(a0, a1 + 1), np.ones(a1 + 1 - a0)/(a1+1-a0))
p0 = dismod3.utils.trim(p0, 1.e-6, 1. - 1.e-6)
# TODO: make beta dispersion study level (instead of datum level)
# p1 = mc.rbeta(p0 * dispersion, (1 - p0) * dispersion)
p1 = p0
# TODO: add additional covariates
if key.find('prevalence') != -1:
if random.random() < .1:
d['self-reported'] = True
p1 = mc.invlogit(mc.logit(p1) - .2)
else:
d['self-reported'] = False
#p2 = mc.rbinomial(n, p1) / n
p2 = float(p1)
d['value'] = p2
d['standard_error'] = .0001
return d
示例3: simdata_postproc
def simdata_postproc(sp_sub, survey_plan):
"""
This function should take a value for the Gaussian random field in the submodel
sp_sub, evaluated at the survey plan locations, and return a simulated dataset.
"""
p = pm.invlogit(sp_sub)
n = survey_plan.n
return pm.rbinomial(n, p)
示例4: p_wells
def p_wells(base_fx=base_fx,
batch_fx=batch_fx,
plate_fx=plate_fx,
batchrow_fx=batchrow_fx,
batchcol_fx=batchcol_fx,
treatment_fx=treatment_fx):
# use this ordering to make everything turn into an ArrayContainer
return invlogit(treatment_fx[treatment_idxs] +
base_fx +
batch_fx[batch_idxs] +
plate_fx[plate_idxs] +
batchrow_fx[batchrow_idxs] +
batchcol_fx[batchcol_idxs])
示例5: PR_samps
def PR_samps(mesh, Ms, Cs, Vs, ind, facs):
"""
Converts a mean function, covariance function, nugget and array of correction factors
to a sample for the average of parasite rate over a given spatiotemporal mesh.
"""
nm = mesh.shape[0]
samps = np.empty((len(ind), nm))
for i in ind:
C = Cs[i](mesh, mesh)
C[::nm+1] += Vs[i]
samps[i,:] = pm.invlogit(pm.mv_normal_cov(Ms[i](mesh), C).ravel()) * facs[A[i]]
return np.mean(samps,axis=1)
示例6: known_age_corr_likelihoods_f
def known_age_corr_likelihoods_f(pos, A, fac_array, f_mesh, nug, type=None):
"""
Computes spline representations over P_mesh for the likelihood
of N_pos | N_exam, A
"""
# TODO: Optimize large-N case using CLT of some kind.
# Allocate work and output arrays.
N_recs = len(A)
likelihoods = empty((N_recs, len(f_mesh)))
likes_now = empty((fac_array.shape[1], len(f_mesh)), dtype=float128)
splreps = []
p1 = invlogit(f_mesh)
# For each record
for i in xrange(N_recs):
posi = pos[i]
Ai = A[i]
spi = np.sum(posi)
negi = 1.-posi
if type is None:
if len(Ai) < 100:
fn = outer_small
else:
fn = outer_large
elif type=='s':
fn = outer_small
else:
fn = outer_large
likelihoods[i,:] = fn(p1, fac_array, Ai, spi, posi, negi, likes_now)
# Clean out occasional infinities on the edges.
good_indices = where(1-isinf(likelihoods[i,:]))[0]
# Compute spline representations.
this_splrep = interp.splrep(x=f_mesh[good_indices], y=likelihoods[i,good_indices].squeeze())
def this_fun(x, sp=this_splrep, Pml=f_mesh[good_indices].min(), Pmh=f_mesh[good_indices].max()):
out = np.atleast_1d(interp.splev(x, sp))
if np.any(x<Pml) or np.any(x>Pmh):
out[np.where(x<Pml)] = -np.Inf
out[np.where(x>Pmh)] = -np.Inf
return out.reshape(np.shape(x))
splreps.append(this_fun)
return splreps
示例7: mortality
def mortality(self, key="all-cause_mortality", data=None):
""" Calculate the all-cause mortality rate for the
region and sex of disease_model, and return it
in an array corresponding to age_mesh
Parameters
----------
key : str, optional
of the form 'all-cause_mortality+gbd_region+year+sex'
data: list, optional
the data list to extract all-cause mortality from
"""
if self.params.get("initial_value", {}).has_key(key):
return self.get_initial_value(key)
if not data:
data = self.filter_data("all-cause_mortality data")
if len(data) == 0:
return NEARLY_ZERO * np.ones(len(self.get_estimate_age_mesh()))
else:
M, C = uninformative_prior_gp(c=-1.0, scale=300.0)
age = []
val = []
V = []
for d in data:
scale = self.extract_units(d)
a0 = d.get("age_start", MISSING)
a1 = d.get("age_end", MISSING)
y = self.value_per_1(d)
se = self.se_per_1(d)
if se == MISSING:
se = 0.01
if MISSING in [a0, a1, y]:
continue
age.append(0.5 * (a0 + a1))
val.append(y + 0.00001)
V.append(se ** 2.0)
if len(data) > 0:
gp.observe(M, C, age, mc.logit(val), V)
normal_approx_vals = mc.invlogit(M(self.get_estimate_age_mesh()))
self.set_initial_value(key, normal_approx_vals)
return self.get_initial_value(key)
示例8: sim_data
def sim_data(N, true_cf=[[.3, .6, .1],
[.3, .5, .2]],
true_std=[[.2, .05, .05],
[.3, 0.1, 0.1]],
sum_to_one=True):
"""
Create an NxTxJ matrix of simulated data (T is determined by the length
of true_cf, J by the length of the elements of true_cf).
true_cf - a list of lists of true cause fractions (each must sum to one)
true_std - a list of lists of the standard deviations corresponding to the true csmf's
for each time point. Can either be a list of length J inside a list of length
1 (in this case, the same standard deviation is used for all time points) or
can be T lists of length J (in this case, the a separate standard deviation
is specified and used for each time point).
"""
if sum_to_one == True:
assert pl.allclose(pl.sum(true_cf, 1), 1), 'The sum of elements of true_cf must equal 1'
T = len(true_cf)
J = len(true_cf[0])
## if only one std provided, duplicate for all time points
if len(true_std)==1 and len(true_cf)>1:
true_std = [true_std[0] for i in range(len(true_cf))]
## transform the mean and std to logit space
transformed_std = []
for t in range(T):
pi_i = pl.array(true_cf[t])
sigma_pi_i = pl.array(true_std[t])
transformed_std.append( ((1/(pi_i*(pi_i-1)))**2 * sigma_pi_i**2)**0.5 )
## find minimum standard deviation (by cause across time) and draw from this
min = pl.array(transformed_std).min(0)
common_perturbation = [pl.ones([T,J])*mc.rnormal(mu=0, tau=min**-2) for n in range(N)]
## draw from remaining variation
tau=pl.array(transformed_std)**2 - min**2
tau[tau==0] = 0.000001
additional_perturbation = [[mc.rnormal(mu=0, tau=tau[t]**-1) for t in range(T)] for n in range(N)]
result = pl.zeros([N, T, J])
for n in range(N):
result[n, :, :] = [mc.invlogit(mc.logit(true_cf[t]) + common_perturbation[n][t] + additional_perturbation[n][t]) for t in range(T)]
return result
示例9: normal_approx
def normal_approx(asrf):
"""
This 'normal approximation' of the age-specific rate function is
formed by using each rate to produce an estimate of the
age-specific rate, and then saying that that logit of the true
rate function is a gaussian process and these age-specific rates
are observations of this gaussian process.
This is less valid and less accurate than using mcmc or map on the
vars produced by the model_rate_list method below, but maybe it
will be faster.
"""
M,C = uninformative_prior_gp()
# use prior to set rate near zero as requested
for prior_str in asrf.fit.get('priors', '').split('\n'):
prior = prior_str.split()
if len(prior) > 0 and prior[0] == 'zero':
age_start = int(prior[1])
age_end = int(prior[2])
gp.observe(M, C, range(age_start, age_end+1), [-10.], [0.])
for r in asrf.rates.all():
mesh, obs, V = logit_rate_from_range(r)
# make sure that there is something to observe
if mesh == []:
continue
# uncomment the following line to make more inferences than
# are valid from the data
#gp.observe(M, C, mesh, obs, V)
# uncomment the following 2 lines to make less inferences than
# possible: it may be better to waste information than have
# false confidence
ii = len(mesh)/2
gp.observe(M, C, [mesh[ii]], [obs[ii]], [V[ii]])
x = asrf.fit['out_age_mesh']
na_rate = mc.invlogit(M(x))
asrf.fit['normal_approx'] = list(na_rate)
asrf.save()
return M, C
示例10: mu_age_p
def mu_age_p(logit_C0=logit_C0, i=rate["i"]["mu_age"], r=rate["r"]["mu_age"], f=rate["f"]["mu_age"]):
# for acute conditions, it is silly to use ODE solver to
# derive prevalence, and it can be approximated with a simple
# transformation of incidence
if r.min() > 5.99:
return i / (r + m_all + f)
C0 = mc.invlogit(logit_C0)
x = pl.hstack((i, r, f, 1 - C0, C0))
y = fun.forward(0, x)
susceptible = y[:N]
condition = y[N:]
p = condition / (susceptible + condition)
p[pl.isnan(p)] = 0.0
return p
示例11: reduce_realizations
def reduce_realizations(filename, reduce_fns, slices, a_lo, a_hi, n_per):
"""
Generates n_per * len(filename.root.realizations) realizations,
on the space-time slice defined by slice (a tuple of three slices)
and reduces them according to the function reduce. Reduce_fns should
be a list of Python functions of the form
reduce(this_PR_chunk, product_sofar=None)
and incorporate this_realization into product_sofar in the desired
way. It should be robust to the product_sofar=None case, of course.
a_lo and a_hi are the limits of the age range.
"""
slices = tuple(slices)
hf = tb.openFile(filename)
hr = hf.root
n_realizations = len(hr.realizations)
products = dict(zip(reduce_fns, [None]*len(reduce_fns)))
N_facs = int(1e5)
# Get nugget variance and age-correction factors
V = hr.PyMCsamples.col('V')[:]
facs = mbgw.correction_factors.age_corr_factors_from_limits(a_lo, a_hi, N_facs)
for i in xrange(n_realizations):
# Pull out parasite rate chunk
tot_slice = (slice(i,i+1,1),) + slices
f_chunk = hr.realizations[tot_slice].squeeze()
for j in xrange(n_per):
chunk = f_chunk + np.random.normal(loc=0, scale=np.sqrt(V[i]), size=f_chunk.shape)
chunk = pm.invlogit(chunk)
chunk *= facs[np.random.randint(N_facs, size=np.prod(chunk.shape))]
chunk = chunk.reshape(f_chunk.shape)
for f in reduce_fns:
product_sofar = products[f]
products[f] = f(chunk, product_sofar)
return products
示例12: ages_and_data
def ages_and_data(N_exam, f_samp, correction_factor_array, age_lims):
"""Called by pred_samps. Simulates ages of survey participants and data given f."""
N_samp = len(f_samp)
N_age_samps = correction_factor_array.shape[1]
# Get samples for the age distribution at the observation points.
age_distribution = []
for i in xrange(N_samp):
l = age_lims[i]
age_distribution.append(S_trace[np.random.randint(S_trace.shape[0]),0,l[0]:l[1]+1])
age_distribution[-1] /= np.sum(age_distribution[-1])
# Draw age for each individual, draw an age-correction profile for each location,
# compute probability of positive for each individual, see how many individuals are
# positive.
A = []
pos = []
for s in xrange(N_samp):
A.append(np.array(pm.rcategorical(age_distribution[s], size=N_exam[s]),dtype=int) + age_lims[s][0])
P_samp = pm.invlogit(f_samp[s].ravel())*correction_factor_array[:,np.random.randint(N_age_samps)][A[-1]]
pos.append(pm.rbernoulli(P_samp))
return A, pos, age_distribution
示例13: fit_emp_prior
def fit_emp_prior(dm, param_type):
""" Generate an empirical prior distribution for a single disease parameter
Parameters
----------
dm : dismod3.DiseaseModel
The object containing all the data, (hyper)-priors, and additional
information (like input and output age-mesh).
param_type : str, one of 'incidence', 'prevalence', 'remission', 'excess-mortality'
The disease parameter to work with
Notes
-----
The results of this fit are stored in the disease model's params
hash for use when fitting multiple paramter types together
Example
-------
$ python2.5 gbd_fit.py 175 -t incidence -p 'zero 0 4, zero 41 100, smooth 25' # takes 7m to run
"""
data = [d for d in dm.data if clean(d['data_type']).find(param_type) != -1]
# don't do anything if there is no data for this parameter type
if len(data) == 0:
return
dm.fit_initial_estimate(param_type, data)
dm.vars = setup(dm, param_type, data)
# fit the model
dm.map = mc.MAP(dm.vars)
try:
dm.map.fit(method='fmin_powell', iterlim=500, tol=.00001, verbose=1)
except KeyboardInterrupt:
print 'User halted optimization routine before optimal value found'
# save the results in the param_hash
dm.clear_empirical_prior()
prior_vals = dict(
alpha=list(dm.vars['region_coeffs'].value),
beta=list(dm.vars['study_coeffs'].value),
gamma=list(dm.vars['age_coeffs'].value),
sigma=float(dm.vars['dispersion'].value))
dm.set_empirical_prior(param_type, prior_vals)
dispersion = prior_vals['sigma']
for r in dismod3.gbd_regions:
for y in dismod3.gbd_years:
for s in dismod3.gbd_sexes:
key = dismod3.gbd_key_for(param_type, r, y, s)
logit_mu = predict_logit_rate(regional_covariates(key), **prior_vals)
mu = mc.invlogit(logit_mu)
dm.set_initial_value(key, mu)
dm.set_mcmc('emp_prior_mean', key, mu)
dm.set_mcmc('emp_prior_lower_ui', key, mc.invlogit(logit_mu - 1.96*dispersion))
dm.set_mcmc('emp_prior_upper_ui', key, mc.invlogit(logit_mu + 1.96*dispersion))
key = dismod3.gbd_key_for(param_type, 'world', 1997, 'total')
logit_mu = predict_logit_rate(regional_covariates(key), **prior_vals)
mu = mc.invlogit(logit_mu)
dm.set_initial_value(key, mu)
dm.set_mcmc('emp_prior_mean', key, mu)
dm.set_mcmc('emp_prior_lower_ui', key, mc.invlogit(logit_mu - 1.96*dispersion))
dm.set_mcmc('emp_prior_upper_ui', key, mc.invlogit(logit_mu + 1.96*dispersion))
示例14: theta
def theta(a=alpha,b=beta):
return pymc.invlogit(a+b*x)
示例15: theta
def theta(a=alpha, b=beta):
"""theta = logit^{−1}(a+b)"""
return pymc.invlogit(a + b * x)