本文整理汇总了Python中microscopes.common.rng.rng函数的典型用法代码示例。如果您正苦于以下问题:Python rng函数的具体用法?Python rng怎么用?Python rng使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rng函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_explicit_exceptions
def test_explicit_exceptions():
"""ValueError should be rasied for bad assignments
"""
prng = rng()
N, V = 3, 7
defn = model_definition(N, V)
data = [[0, 1, 2, 3], [0, 1, 4], [0, 1, 5, 6]]
# We should get an error if we leave out a dish assignment for a given table
table_assignments = [[1, 2, 1, 2], [1, 1, 1], [3, 3, 3, 1]]
dish_assignments = [[0, 1, 2], [0, 3], [0, 1, 2]]
assert_raises(ValueError,
initialize,
defn, data,
table_assignments=table_assignments,
dish_assignments=dish_assignments)
# We should get an error if we leave out a table assignment for a given word
table_assignments = [[1, 2, 1, 2], [1, 1, 1], [3, 3, 3]]
dish_assignments = [[0, 1, 2], [0, 3], [0, 1, 2, 1]]
assert_raises(ValueError,
initialize,
defn, data,
table_assignments=table_assignments,
dish_assignments=dish_assignments)
示例2: test_kernel_gibbs_hp
def test_kernel_gibbs_hp():
_test_kernel_gibbs_hp(initialize,
numpy_dataview,
bind,
gibbs_hp,
'grid_gibbs_hp_samples_pdf',
rng())
示例3: _test_runner_simple
def _test_runner_simple(defn, kc_fn):
views = map(numpy_dataview, toy_dataset(defn))
kc = kc_fn(defn)
prng = rng()
latent = model.initialize(defn, views, prng)
r = runner.runner(defn, views, latent, kc)
r.run(prng, 10)
示例4: test_slice_theta_mm
def test_slice_theta_mm():
N = 100
data = np.array(
[(np.random.random() < 0.8,) for _ in xrange(N)],
dtype=[('', bool)])
defn = model_definition(N, [bbnc])
r = rng()
prior = {'alpha': 1.0, 'beta': 9.0}
view = numpy_dataview(data)
s = initialize(
defn,
view,
cluster_hp={'alpha': 1., 'beta': 9.},
feature_hps=[prior],
r=r,
assignment=[0] * N)
heads = len([1 for y in data if y[0]])
tails = N - heads
alpha1 = prior['alpha'] + heads
beta1 = prior['beta'] + tails
bs = bind(s, view)
params = {0: {'p': 0.05}}
def sample_fn():
theta(bs, r, tparams=params)
return s.get_suffstats(0, 0)['p']
rv = beta(alpha1, beta1)
assert_1d_cont_dist_approx_sps(sample_fn, rv, nsamples=50000)
示例5: test_multivariate_models_cxx
def test_multivariate_models_cxx():
_test_multivariate_models(
initialize,
numpy_dataview,
bind,
gibbs_assign,
rng())
示例6: _test_convergence_bb_cxx
def _test_convergence_bb_cxx(N,
D,
kernel,
preprocess_data_fn=None,
nonconj=False,
burnin_niters=10000,
skip=10,
ntries=50,
nsamples=1000,
kl_places=2):
r = rng()
cluster_hp = {'alpha': 2.0}
feature_hps = [{'alpha': 1.0, 'beta': 1.0}] * D
defn = model_definition(N, [bb] * D)
nonconj_defn = model_definition(N, [bbnc] * D)
Y, posterior = data_with_posterior(
defn, cluster_hp, feature_hps, preprocess_data_fn)
data = numpy_dataview(Y)
s = initialize(nonconj_defn if nonconj else defn,
data,
cluster_hp=cluster_hp,
feature_hps=feature_hps,
r=r)
bs = bind(s, data)
wrapped_kernel = lambda s: kernel(s, r)
_test_convergence(bs,
posterior,
wrapped_kernel,
burnin_niters,
skip,
ntries,
nsamples,
kl_places)
示例7: test_cxx_sample_post_pred_given_data
def test_cxx_sample_post_pred_given_data():
assert D == 5
y_new = ma.masked_array(
np.array([(True, False, True, True, True)], dtype=[('', np.bool)] * 5),
mask=[(False, False, True, True, True)])[0]
_test_sample_post_pred(
cxx_initialize, cxx_numpy_dataview, y_new, rng(543234))
示例8: test_cant_serialize
def test_cant_serialize():
N, V = 10, 20
defn = model_definition(N, V)
data = toy_dataset(defn)
prng = rng()
s = initialize(defn, data, prng)
s.serialize()
示例9: test_slice_theta_irm
def test_slice_theta_irm():
N = 10
defn = model_definition([N], [((0, 0), bbnc)])
data = np.random.random(size=(N, N)) < 0.8
view = numpy_dataview(data)
r = rng()
prior = {'alpha': 1.0, 'beta': 9.0}
s = initialize(
defn,
[view],
r=r,
cluster_hps=[{'alpha': 2.0}],
relation_hps=[prior],
domain_assignments=[[0] * N])
bs = bind(s, 0, [view])
params = {0: {'p': 0.05}}
heads = len([1 for y in data.flatten() if y])
tails = len([1 for y in data.flatten() if not y])
alpha1 = prior['alpha'] + heads
beta1 = prior['beta'] + tails
def sample_fn():
theta(bs, r, tparams=params)
return s.get_suffstats(0, [0, 0])['p']
rv = beta(alpha1, beta1)
assert_1d_cont_dist_approx_sps(sample_fn, rv, nsamples=50000)
示例10: test_alpha_numeric
def test_alpha_numeric():
docs = [list('abcd'), list('cdef')]
defn = model_definition(len(docs), v=6)
prng = rng()
s = initialize(defn, docs, prng)
assert_equals(s.nentities(), len(docs))
assert_equals(s.nwords(), 6)
示例11: test_dense_vs_sparse
def test_dense_vs_sparse():
# XXX: really belongs in irm test cases, but kernels has a nice cluster
# enumeration iterator
r = rng()
n = 5
raw = ma.array(
np.random.choice(np.arange(20), size=(n, n)),
mask=np.random.choice([False, True], size=(n, n)))
dense = [relation_numpy_dataview(raw)]
sparse = [sparse_relation_dataview(_tocsr(raw))]
domains = [n]
relations = [((0, 0), gp)]
defn = irm_definition(domains, relations)
def score_fn(data):
def f(assignments):
s = irm_initialize(defn, data, r=r, domain_assignments=assignments)
assign = sum(s.score_assignment(i)
for i in xrange(len(assignments)))
likelihood = s.score_likelihood(r)
return assign + likelihood
return f
product_assignments = tuple(map(list, map(permutation_iter, domains)))
dense_posterior = scores_to_probs(
np.array(map(score_fn(dense), it.product(*product_assignments))))
sparse_posterior = scores_to_probs(
np.array(map(score_fn(sparse), it.product(*product_assignments))))
assert_1d_lists_almost_equals(dense_posterior, sparse_posterior, places=3)
示例12: test_runner_multiprocessing_convergence
def test_runner_multiprocessing_convergence():
N, D = 4, 5
defn = model_definition(N, [bb] * D)
prng = rng()
Y, posterior = data_with_posterior(defn, r=prng)
view = numpy_dataview(Y)
latents = [model.initialize(defn, view, prng)
for _ in xrange(mp.cpu_count())]
runners = [runner.runner(defn, view, latent, ['assign'])
for latent in latents]
r = parallel.runner(runners)
r.run(r=prng, niters=1000) # burnin
idmap = {C: i for i, C in enumerate(permutation_iter(N))}
def sample_iter():
r.run(r=prng, niters=10)
for latent in r.get_latents():
yield idmap[tuple(permutation_canonical(latent.assignments()))]
ref = [None]
def sample_fn():
if ref[0] is None:
ref[0] = sample_iter()
try:
return next(ref[0])
except StopIteration:
ref[0] = None
return sample_fn()
assert_discrete_dist_approx(sample_fn, posterior, ntries=100, kl_places=2)
示例13: test_runner_multiprocessing_convergence
def test_runner_multiprocessing_convergence():
domains = [4]
defn = model_definition(domains, [((0, 0), bb)])
prng = rng()
relations, posterior = data_with_posterior(defn, prng)
views = map(numpy_dataview, relations)
latents = [model.initialize(defn, views, prng)
for _ in xrange(mp.cpu_count())]
kc = [('assign', range(len(domains)))]
runners = [runner.runner(defn, views, latent, kc) for latent in latents]
r = parallel.runner(runners)
r.run(r=prng, niters=10000) # burnin
product_assignments = tuple(map(list, map(permutation_iter, domains)))
idmap = {C: i for i, C in enumerate(it.product(*product_assignments))}
def sample_iter():
r.run(r=prng, niters=10)
for latent in r.get_latents():
key = tuple(tuple(permutation_canonical(latent.assignments(i)))
for i in xrange(len(domains)))
yield idmap[key]
ref = [None]
def sample_fn():
if ref[0] is None:
ref[0] = sample_iter()
try:
return next(ref[0])
except StopIteration:
ref[0] = None
return sample_fn()
assert_discrete_dist_approx(sample_fn, posterior, ntries=100, kl_places=2)
示例14: test_posterior_predictive_statistic
def test_posterior_predictive_statistic():
N, D = 10, 4 # D needs to be even
defn = model_definition(N, [bb] * D)
Y = toy_dataset(defn)
prng = rng()
view = numpy_dataview(Y)
latents = [model.initialize(defn, view, prng) for _ in xrange(10)]
q = ma.masked_array(
np.array([(False,) * D], dtype=[('', bool)] * D),
mask=[(False,) * (D / 2) + (True,) * (D / 2)])
statistic = query.posterior_predictive_statistic(q, latents, prng)
assert_equals(statistic.shape, (1,))
assert_equals(len(statistic.dtype), D)
statistic = query.posterior_predictive_statistic(
q, latents, prng, merge='mode')
assert_equals(statistic.shape, (1,))
assert_equals(len(statistic.dtype), D)
statistic = query.posterior_predictive_statistic(
q, latents, prng, merge=['mode', 'mode', 'avg', 'avg'])
assert_equals(statistic.shape, (1,))
assert_equals(len(statistic.dtype), D)
q = ma.masked_array(
np.array([(False,) * D] * 3, dtype=[('', bool)] * D),
mask=[(False,) * (D / 2) + (True,) * (D / 2)] * 3)
statistic = query.posterior_predictive_statistic(q, latents, prng)
assert_equals(statistic.shape, (3,))
assert_equals(len(statistic.dtype), D)
示例15: data_with_posterior
def data_with_posterior(defn,
cluster_hp=None,
feature_hps=None,
preprocess_data_fn=None,
r=None):
# XXX(stephentu): should only accept conjugate models
if r is None:
r = rng()
Y_clusters, _ = sample(defn, cluster_hp, feature_hps, r)
Y = np.hstack(Y_clusters)
if preprocess_data_fn:
Y = preprocess_data_fn(Y)
data = numpy_dataview(Y)
def score_fn(assignment):
s = initialize(defn,
data,
r,
cluster_hp=cluster_hp,
feature_hps=feature_hps,
assignment=assignment)
return s.score_joint(r)
posterior = dist_on_all_clusterings(score_fn, defn.n())
return Y, posterior