当前位置: 首页>>代码示例>>Python>>正文


Python RBM.get_cost_updates方法代码示例

本文整理汇总了Python中rbm.RBM.get_cost_updates方法的典型用法代码示例。如果您正苦于以下问题:Python RBM.get_cost_updates方法的具体用法?Python RBM.get_cost_updates怎么用?Python RBM.get_cost_updates使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在rbm.RBM的用法示例。


在下文中一共展示了RBM.get_cost_updates方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test

# 需要导入模块: from rbm import RBM [as 别名]
# 或者: from rbm.RBM import get_cost_updates [as 别名]
def test(learning_rate=0.1, k=1, training_epochs=15):
  print '... loading data'

  datasets = load_data('mnist.pkl.gz')
  train_set_x, train_set_y = datasets[0]
  test_set_x, test_set_y = datasets[2]

  print '... modeling'

  rbm = RBM(input=train_set_x, n_visible=28 * 28, n_hidden=500)

  print '... training'

  start_time = time.clock()

  for epoch in xrange(training_epochs):
    cost = rbm.get_cost_updates(lr=learning_rate, k=k)
    print 'Training epoch %d, cost is ' % epoch, cost

  end_time = time.clock()
  pretraining_time = (end_time - start_time)

  print ('Training took %f minutes' % (pretraining_time / 60.))
开发者ID:belkhir-nacim,项目名称:rbm-mnist,代码行数:25,代码来源:test.py

示例2: test_rbm

# 需要导入模块: from rbm import RBM [as 别名]
# 或者: from rbm.RBM import get_cost_updates [as 别名]
def test_rbm(
    datasets,
    learning_rate=0.1,
    training_epochs=15,
    batch_size=20,
    n_chains=20,
    n_samples=10,
    output_folder="rbm_plots",
    n_hidden=500,
):
    """
    Demonstrate how to train and afterwards sample from it using Theano.

    This is demonstrated on MNIST.

    :param learning_rate: learning rate used for training the RBM

    :param training_epochs: number of epochs used for training

    :param datasets: train/val/test set tensors

    :param batch_size: size of a batch used to train the RBM

    :param n_chains: number of parallel Gibbs chains to be used for sampling

    :param n_samples: number of samples to plot for each chain

    """

    train_set_x, train_set_y = datasets[0]
    test_set_x, test_set_y = datasets[2]

    n_features = train_set_x.get_value(borrow=True).shape[1]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size

    # allocate symbolic variables for the data
    index = T.lscalar()  # index to a [mini]batch
    x = T.matrix("x")  # the data is presented as rasterized images

    rng = np.random.RandomState(123)
    theano_rng = RandomStreams(rng.randint(2 ** 30))

    # initialize storage for the persistent chain (state = hidden
    # layer of chain)
    persistent_chain = theano.shared(np.zeros((batch_size, n_hidden), dtype=theano.config.floatX), borrow=True)

    # construct the RBM class
    rbm = RBM(input=x, n_visible=n_features, n_hidden=n_hidden, numpy_rng=rng, theano_rng=theano_rng)

    # get the cost and the gradient corresponding to one step of CD-15
    cost, updates = rbm.get_cost_updates(lr=learning_rate, persistent=persistent_chain, k=15)

    #################################
    #     Training the RBM          #
    #################################
    if not os.path.isdir(output_folder):
        os.makedirs(output_folder)
    os.chdir(output_folder)

    train_set_x_batch = sparse.dense_from_sparse(train_set_x[index * batch_size : (index + 1) * batch_size])

    # it is ok for a theano function to have no output
    # the purpose of train_rbm is solely to update the RBM parameters
    train_rbm = theano.function([index], cost, updates=updates, givens={x: train_set_x_batch}, name="train_rbm")

    plotting_time = 0.0
    start_time = time.clock()

    # go through training epochs
    for epoch in xrange(training_epochs):

        # go through the training set
        mean_cost = []
        for batch_index in xrange(n_train_batches):
            mean_cost += [train_rbm(batch_index)]

        print "Training epoch %d, cost is " % epoch, np.mean(mean_cost)

        # Plot filters after each training epoch
        plotting_start = time.clock()
        # Construct image from the weight matrix
        image = PIL.Image.fromarray(rbm.W.get_value(borrow=True).T)
        image.save("filters_at_epoch_%i.png" % epoch)
        plotting_stop = time.clock()
        plotting_time += plotting_stop - plotting_start

    end_time = time.clock()

    pretraining_time = (end_time - start_time) - plotting_time

    print ("Training took %f minutes" % (pretraining_time / 60.0))

    #################################
    #     Sampling from the RBM     #
    #################################
    # find out the number of test samples
    number_of_test_samples = test_set_x.get_value(borrow=True).shape[0]

#.........这里部分代码省略.........
开发者ID:pdikang,项目名称:kaggle,代码行数:103,代码来源:train_dbm.py

示例3: RBM

# 需要导入模块: from rbm import RBM [as 别名]
# 或者: from rbm.RBM import get_cost_updates [as 别名]
batch_size = 100

r = RBM(input = x, num_vis = num_vis, num_hid = num_hid)
#f = theano.function([index], 
#             r.prop_up(x), 
#             givens = [( x, data_sh[ index*batch_size: (index+1)*batch_size] )] 
#             )

#chain_start = r.sample_h_given_v(r.input)
#[pre_sigmoid_nvs, nv_means, nv_samples,
# pre_sigmoid_nhs, nh_means, nh_samples], updates = \
#    theano.scan(r.gibbs_hvh,
#            outputs_info=[None,  None,  None, None, None, chain_start],
#            n_steps=10)
#f = theano.function([],nh_samples[-1], updates = updates, givens =  [( x, data_sh[ 0*batch_size: (0+1)*batch_size] )])
cost, updates = r.get_cost_updates(lr=0.1,
                                         persistent=None, k=15)
train_rbm = theano.function([index], cost,
           updates=updates,
           givens=[(x, data_sh[index * batch_size: (index + 1) * batch_size])],
           name='train_rbm')

max_epoch = 100
for ep in xrange(0,max_epoch):
    for i in xrange(0,10):
        print train_rbm(i)





开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:28,代码来源:rbm_toy.py


注:本文中的rbm.RBM.get_cost_updates方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。