当前位置: 首页>>代码示例>>Python>>正文


Python models.Variational类代码示例

本文整理汇总了Python中edward.models.Variational的典型用法代码示例。如果您正苦于以下问题:Python Variational类的具体用法?Python Variational怎么用?Python Variational使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Variational类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _test

    def _test(self, sess, data, n_minibatch, x=None, is_file=False):
        model = NormalModel()
        variational = Variational()
        variational.add(Normal())

        inference = ed.MFVI(model, variational, data)
        inference.initialize(n_minibatch=n_minibatch)

        if x is not None:
            # Placeholder setting.
            # Check data is same as data fed to it.
            feed_dict = {inference.data['x']: x}
            # avoid directly fetching placeholder
            data_id = {k: tf.identity(v) for k,v in
                       six.iteritems(inference.data)}
            val = sess.run(data_id, feed_dict)
            assert np.all(val['x'] == x)
        elif is_file:
            # File reader setting.
            # Check data varies by session run.
            val = sess.run(inference.data)
            val_1 = sess.run(inference.data)
            assert not np.all(val['x'] == val_1['x'])
        elif n_minibatch is None:
            # Preloaded full setting.
            # Check data is full data.
            val = sess.run(inference.data)
            assert np.all(val['x'] == data['x'])
        elif n_minibatch == 1:
            # Preloaded batch setting, with n_minibatch=1.
            # Check data is randomly shuffled.
            assert not np.all([sess.run(inference.data)['x'] == data['x'][i] for i in range(10)])
        else:
            # Preloaded batch setting.
            # Check data is randomly shuffled.
            val = sess.run(inference.data)
            assert not np.all(val['x'] == data['x'][:n_minibatch])
            # Check data varies by session run.
            val_1 = sess.run(inference.data)
            assert not np.all(val['x'] == val_1['x'])

        inference.finalize()
开发者ID:TalkingData,项目名称:edward,代码行数:42,代码来源:test_inference_data.py

示例2: _test

def _test(data, n_data, x=None, is_file=False):
    sess = ed.get_session()
    model = NormalModel()
    variational = Variational()
    variational.add(Normal())

    inference = ed.MFVI(model, variational, data)
    inference.initialize(n_data=n_data)

    if x is not None:
        # Placeholder setting.
        # Check data is same as data fed to it.
        feed_dict = {inference.data['x']: x}
        # avoid directly fetching placeholder
        data_id = {k: tf.identity(v) for k,v in
                   six.iteritems(inference.data)}
        val = sess.run(data_id, feed_dict)
        assert np.all(val['x'] == x)
    elif is_file:
        # File reader setting.
        # Check data varies by session run.
        val = sess.run(inference.data)
        val_1 = sess.run(inference.data)
        assert not np.all(val['x'] == val_1['x'])
    elif n_data is None:
        # Preloaded full setting.
        # Check data is full data.
        val = sess.run(inference.data)
        assert np.all(val['x'] == data['x'])
    else:
        # Preloaded batch setting.
        # Check data is randomly shuffled.
        val = sess.run(inference.data)
        assert not np.all(val['x'] == data['x'][:n_data])
        # Check data varies by session run.
        val_1 = sess.run(inference.data)
        assert not np.all(val['x'] == val_1['x'])

    inference.finalize()
    sess.close()
    del sess
    tf.reset_default_graph()
开发者ID:appcoreopc,项目名称:edward,代码行数:42,代码来源:test_inference_data.py

示例3: __init__

    def __init__(self, model, data=Data(), transform=tf.identity):
        if hasattr(model, 'num_vars'):
            variational = Variational()
            variational.add(PointMass(model.num_vars, transform))
        else:
            variational = Variational()
            variational.add(PointMass(0, transform))

        VariationalInference.__init__(self, model, variational, data)
开发者ID:cbonnett,项目名称:edward,代码行数:9,代码来源:inferences.py

示例4: __init__

    def __init__(self, model, data=Data(), params=None):
        if hasattr(model, 'num_vars'):
            variational = Variational()
            variational.add(PointMass(model.num_vars, params))
        else:
            variational = Variational()
            variational.add(PointMass(0))

        VariationalInference.__init__(self, model, variational, data)
开发者ID:diengadji,项目名称:edward,代码行数:9,代码来源:inferences.py

示例5: __init__

    def __init__(self, model, data=None, params=None):
        with tf.variable_scope("variational"):
            if hasattr(model, 'n_vars'):
                variational = Variational()
                variational.add(PointMass(model.n_vars, params))
            else:
                variational = Variational()
                variational.add(PointMass(0))

        super(MAP, self).__init__(model, variational, data)
开发者ID:leezqcst,项目名称:edward,代码行数:10,代码来源:inferences.py

示例6: p

    Posterior: (1-dimensional) Normal
Variational model
    Likelihood: Mean-field Normal
"""
import edward as ed
import tensorflow as tf

from edward.models import Variational, Normal
from edward.stats import norm

class NormalPosterior:
    """
    p(x, z) = p(z) = p(z | x) = Normal(z; mu, std)
    """
    def __init__(self, mu, std):
        self.mu = mu
        self.std = std

    def log_prob(self, xs, zs):
        return norm.logpdf(zs, self.mu, self.std)

ed.set_seed(42)
mu = tf.constant(1.0)
std = tf.constant(1.0)
model = NormalPosterior(mu, std)
variational = Variational()
variational.add(Normal())

inference = ed.MFVI(model, variational)
inference.run(n_iter=10000)
开发者ID:313-Ventures,项目名称:edward,代码行数:30,代码来源:normal.py

示例7: range

    stds = [[0.1, 0.1], [0.1, 0.1]]
    x = np.zeros((N, 2), dtype=np.float32)
    for n in range(N):
        k = np.argmax(np.random.multinomial(1, pi))
        x[n, :] = np.random.multivariate_normal(mus[k], np.diag(stds[k]))

    return {'x': x}


ed.set_seed(42)
data = build_toy_dataset(500)
plt.scatter(data['x'][:, 0], data['x'][:, 1])
plt.axis([-3, 3, -3, 3])
plt.title("Simulated dataset")
plt.show()

model = MixtureGaussian(K=2, D=2)
variational = Variational()
variational.add(Dirichlet(model.K))
variational.add(Normal(model.K*model.D))
variational.add(InvGamma(model.K*model.D))

inference = ed.MFVI(model, variational, data)
inference.run(n_iter=4000, n_samples=50, n_minibatch=10)

clusters = np.argmax(ed.evaluate('log_likelihood', model, variational, data), axis=0)
plt.scatter(data['x'][:, 0], data['x'][:, 1], c=clusters, cmap=cm.bwr)
plt.axis([-3, 3, -3, 3])
plt.title("Predicted cluster assignments")
plt.show()
开发者ID:TalkingData,项目名称:edward,代码行数:30,代码来源:mixture_gaussian.py

示例8: x

    ed.set_seed(0)
    D = 1
    x  = np.linspace(-3, 3, num=n_data)
    y = np.tanh(x) + norm.rvs(0, noise_std, size=n_data)
    y[y < 0.5] = 0
    y[y >= 0.5] = 1
    x = (x - 4.0) / 4.0
    x = x.reshape((n_data, D))
    y = y.reshape((n_data, 1))
    data = np.concatenate((y, x), axis=1) # n_data x (D+1)
    data = tf.constant(data, dtype=tf.float32)
    return ed.Data(data)

ed.set_seed(42)
model = HierarchicalLogistic(weight_dim=[1,1])
variational = Variational()
variational.add(Normal(model.num_vars))
data = build_toy_dataset()

# Set up figure
fig = plt.figure(figsize=(8,8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)

inference = ed.MFVI(model, variational, data)
inference.initialize(n_print=5)
sess = ed.get_session()
for t in range(600):
    loss = inference.update()
    if t % inference.n_print == 0:
开发者ID:aporia3517,项目名称:edward,代码行数:31,代码来源:hierarchical_logistic_regression.py

示例9: NormalBernoulli


ed.set_seed(42)
model = NormalBernoulli(n_vars=10)

# Use the variational model
# q(z | x) = prod_{n=1}^n Normal(z_n | loc, scale = neural_network(x_n))
# It is a distribution of the latent variables z_n for each data
# point x_n. We use neural_network() to globally parameterize the local
# variational factors q(z_n | x).
# We also do data subsampling during inference. Therefore we only need
# to explicitly represent the variational factors for a mini-batch,
# q(z_{batch} | x) = prod_{m=1}^{n_data} Normal(z_m | loc, scale = neural_network(x_m))
x_ph = tf.placeholder(tf.float32, [N_MINIBATCH, 28 * 28])
loc, scale = neural_network(x_ph)
variational = Variational()
variational.add(Normal(model.num_vars * N_MINIBATCH, loc=loc, scale=scale))

# MNIST batches are fed at training time.
if not os.path.exists(DATA_DIR):
    os.makedirs(DATA_DIR)

mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)
x = tf.placeholder(tf.float32, [N_MINIBATCH, 28 * 28])
data = {'x': x}

sess = ed.get_session()
inference = ed.MFVI(model, variational, data)
with tf.variable_scope("model") as scope:
    inference.initialize(optimizer="PrettyTensor")
with tf.variable_scope("model", reuse=True) as scope:
开发者ID:jf0510310315,项目名称:edward,代码行数:29,代码来源:convolutional_vae.py

示例10: p

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import edward as ed
import tensorflow as tf

from edward.models import Variational, Bernoulli
from edward.stats import bernoulli


class BernoulliPosterior:
    """
    p(x, z) = p(z) = p(z | x) = Bernoulli(z; p)
    """
    def __init__(self, p):
        self.p = p

    def log_prob(self, xs, zs):
        return bernoulli.logpmf(zs, p)


ed.set_seed(42)
p = tf.constant(0.6)
model = BernoulliPosterior(p)
variational = Variational()
variational.add(Bernoulli())

inference = ed.MFVI(model, variational)
inference.run(n_iter=10000)
开发者ID:appcoreopc,项目名称:edward,代码行数:30,代码来源:bernoulli.py

示例11: get_dims

        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)), 1)
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b), 1)

        # Loop over each mini-batch zs[b,:]
        log_lik = []
        n_minibatch = get_dims(zs)[0]
        for s in range(n_minibatch):
            log_lik_z = N*tf.reduce_sum(tf.log(pi), 1)
            for k in range(self.K):
                log_lik_z += tf.reduce_sum(multivariate_normal.logpdf(xs,
                    mus[s, (k*self.D):((k+1)*self.D)],
                    sigmas[s, (k*self.D):((k+1)*self.D)]))

            log_lik += [log_lik_z]

        return log_prior + tf.pack(log_lik)

ed.set_seed(42)
x = np.loadtxt('data/mixture_data.txt', dtype='float32', delimiter=',')
data = ed.Data(tf.constant(x, dtype=tf.float32))

model = MixtureGaussian(K=2, D=2)
variational = Variational()
variational.add(Dirichlet([1, model.K]))
variational.add(Normal(model.K*model.D))
variational.add(InvGamma(model.K*model.D))

inference = ed.MFVI(model, variational, data)
inference.run(n_iter=500, n_minibatch=5, n_data=5)
开发者ID:crack521,项目名称:edward,代码行数:30,代码来源:mixture_gaussian.py

示例12: build_toy_dataset

def build_toy_dataset(n_data=40, noise_std=0.1):
    ed.set_seed(0)
    x  = np.concatenate([np.linspace(0, 2, num=n_data/2),
                         np.linspace(6, 8, num=n_data/2)])
    y = 0.075*x + norm.rvs(0, noise_std, size=n_data)
    x = (x - 4.0) / 4.0
    x = x.reshape((n_data, 1))
    y = y.reshape((n_data, 1))
    data = np.concatenate((y, x), axis=1) # n_data x 2
    data = tf.constant(data, dtype=tf.float32)
    return ed.Data(data)

ed.set_seed(42)
model = LinearModel()
variational = Variational()
variational.add(Normal(model.num_vars))
data = build_toy_dataset()

# Set up figure
fig = plt.figure(figsize=(8,8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)

sess = ed.get_session()
inference = ed.MFVI(model, variational, data)
inference.initialize(n_minibatch=5, n_print=5)
for t in range(250):
    loss = inference.update()
    if t % inference.n_print == 0:
开发者ID:aporia3517,项目名称:edward,代码行数:30,代码来源:bayesian_linear_regression_plot.py

示例13: NormalBernoulli

    return [mean, stddev]

ed.set_seed(42)
model = NormalBernoulli(num_vars=10)

# We use the variational model
# q(z | x) = prod_{n=1}^N q(z_n | x)
#          = prod_{n=1}^n Normal(z_n | mu, sigma = phi(x_n))
# It is a distribution of the latent variables z_n for each data
# point x_n. We use mapping() to globally parameterize the local
# variational factors q(z_n | x).
# We also do data subsampling during inference. Therefore we only need
# to explicitly represent the corresponding variational factors for a
# mini-batch,
# q(z_{batch} | x) = prod_{m=1}^{n_data} Normal(z_m | mu, sigma = phi(x))
variational = Variational()
Normal.mapping = mapping
Normal.num_local_vars = model.num_vars
variational.add(Normal(model.num_vars * FLAGS.n_data))

if not os.path.exists(FLAGS.data_directory):
    os.makedirs(FLAGS.data_directory)

mnist = input_data.read_data_sets(FLAGS.data_directory, one_hot=True)

# data uses placeholder in order to build inference's computational
# graph. np.arrays of data are fed in during computation.
x = tf.placeholder(tf.float32, [FLAGS.n_data, 28 * 28])
data = ed.Data(x)

inference = ed.MFVI(model, variational, data)
开发者ID:313-Ventures,项目名称:edward,代码行数:31,代码来源:convolutional_vae.py

示例14: NormalBernoulli


ed.set_seed(42)
model = NormalBernoulli(num_vars=10)

# Use the variational model
# q(z | x) = prod_{n=1}^n Normal(z_n | loc, scale = neural_network(x_n))
# It is a distribution of the latent variables z_n for each data
# point x_n. We use neural_network() to globally parameterize the local
# variational factors q(z_n | x).
# We also do data subsampling during inference. Therefore we only need
# to explicitly represent the variational factors for a mini-batch,
# q(z_{batch} | x) = prod_{m=1}^{n_data} Normal(z_m | loc, scale = neural_network(x_m))
x_ph = tf.placeholder(tf.float32, [N_DATA, 28 * 28])
loc, scale = neural_network(x_ph)
variational = Variational()
variational.add(Normal(model.num_vars * N_DATA, loc=loc, scale=scale))

# MNIST batches are fed at training time.
if not os.path.exists(DATA_DIR):
    os.makedirs(DATA_DIR)

mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)
x = tf.placeholder(tf.float32, [N_DATA, 28 * 28])
data = {'x': x}

sess = ed.get_session()
inference = ed.MFVI(model, variational, data)
with tf.variable_scope("model") as scope:
    inference.initialize(optimizer="PrettyTensor")
with tf.variable_scope("model", reuse=True) as scope:
开发者ID:appcoreopc,项目名称:edward,代码行数:29,代码来源:convolutional_vae.py

示例15: NormalBernoulli

ed.set_seed(42)
model = NormalBernoulli(num_vars=10)

# We use the variational model
# q(z | x) = prod_{n=1}^N q(z_n | x)
#          = prod_{n=1}^n Normal(z_n | loc, scale = phi(x_n))
# It is a distribution of the latent variables z_n for each data
# point x_n. We use neural_network() to globally parameterize the local
# variational factors q(z_n | x).
# We also do data subsampling during inference. Therefore we only need
# to explicitly represent the corresponding variational factors for a
# mini-batch,
# q(z_{batch} | x) = prod_{m=1}^{n_data} Normal(z_m | loc, scale = phi(x))
x_ph = tf.placeholder(tf.float32, [FLAGS.n_data, 28 * 28])
loc, scale = neural_network(x_ph)
variational = Variational()
variational.add(Normal(model.num_vars * FLAGS.n_data, loc=loc, scale=scale))

if not os.path.exists(FLAGS.data_directory):
    os.makedirs(FLAGS.data_directory)

mnist = input_data.read_data_sets(FLAGS.data_directory, one_hot=True)

# data uses placeholder in order to build inference's computational
# graph. np.arrays of data are fed in during computation.
x = tf.placeholder(tf.float32, [FLAGS.n_data, 28 * 28])
data = ed.Data(x)

sess = ed.get_session()
inference = ed.MFVI(model, variational, data)
with tf.variable_scope("model") as scope:
开发者ID:bradleyhb,项目名称:edward,代码行数:31,代码来源:convolutional_vae.py


注:本文中的edward.models.Variational类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。