当前位置: 首页>>代码示例>>Python>>正文


Python rbm.RBM类代码示例

本文整理汇总了Python中rbm.RBM的典型用法代码示例。如果您正苦于以下问题:Python RBM类的具体用法?Python RBM怎么用?Python RBM使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了RBM类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(
        self,
        input,
        n_in=784,
        n_hidden=500,
        W=None,
        hbias=None,
        vbias=None,
        numpy_rng=None,
        transpose=False,
        activation=T.nnet.sigmoid,
        theano_rng=None,
        name="grbm",
        W_r=None,
        dropout=0,
        dropconnect=0,
    ):

        # initialize parent class (RBM)
        RBM.__init__(
            self,
            input=input,
            n_visible=n_in,
            n_hidden=n_hidden,
            W=W,
            hbias=hbias,
            vbias=vbias,
            numpy_rng=numpy_rng,
            theano_rng=theano_rng,
        )
开发者ID:pangyuteng,项目名称:chalearn2014_wudi_lio,代码行数:30,代码来源:grbm.py

示例2: __init__

    def __init__(self, input=None, n_visible=784, n_hidden=500,
                 W=None, h_bias=None, v_bias=None, numpy_rng=None, theano_rng=None):
        """
        GBRBM constructor. Defines the parameters of the model along with
        basic operations for inferring hidden from visible (and vice-versa).
        It initialize parent class (RBM).

        :param input: None for standalone RBMs or symbolic variable if RBM is part of a larger graph.

        :param n_visible: number of visible units

        :param n_hidden: number of hidden units

        :param W: None for standalone RBMs or symbolic variable pointing to a
        shared weight matrix in case RBM is part of a DBN network; in a DBN,
        the weights are shared between RBMs and layers of a MLP

        :param h_bias: None for standalone RBMs or symbolic variable pointing
        to a shared hidden units bias vector in case RBM is part of a
        different network

        :param v_bias: None for standalone RBMs or a symbolic variable
        pointing to a shared visible units bias
        """
        RBM.__init__(
            self,
            input=input,
            n_visible=n_visible,
            n_hidden=n_hidden,
            W=W, h_bias=h_bias,
            v_bias=v_bias,
            numpy_rng=numpy_rng,
            theano_rng=theano_rng)
开发者ID:gdl-civestav-localization,项目名称:cinvestav_location_fingerprinting,代码行数:33,代码来源:grbm.py

示例3: __init__

 def __init__(
     self,
     input,
     n_visible=784,
     n_hidden=500,
     W=None,
     hbias=None,
     vbias=None,
     numpy_rng=None,
     transpose=False,
     theano_rng=None,
     weight_decay=0.0002,
 ):
     RBM.__init__(
         self,
         input=input,
         n_visible=n_visible,
         n_hidden=n_hidden,
         W=W,
         hbias=hbias,
         vbias=vbias,
         numpy_rng=numpy_rng,
         theano_rng=theano_rng,
         weight_decay=weight_decay,
     )
开发者ID:urszula-kaczmar,项目名称:SpeechDBN,代码行数:25,代码来源:grbm.py

示例4: pretrain_rbm_layers

def pretrain_rbm_layers(v, validation_v=None, n_hidden=[], gibbs_steps=[], batch_size=[], num_epochs=[], learning_rate=[], probe_epochs=[]):
    rbm_layers = []
    n_rbm = len(n_hidden)
    # create rbm layers
    for i in range(n_rbm):
        rbm = RBM(n_hidden=n_hidden[i],
                    gibbs_steps=gibbs_steps[i],
                    batch_size=batch_size[i],
                    num_epochs=num_epochs[i],
                    learning_rate=learning_rate[i],
                    probe_epochs=probe_epochs[i])
        rbm_layers.append(rbm)
    # pretrain rbm layers
    input = v
    validation_input = validation_v
    for rbm, i in zip(rbm_layers, range(len(rbm_layers))):
        print '### pretraining RBM Layer {i}'.format(i=i)
        rbm.fit(input, validation_input)
        output = rbm.sample_h_given_v(input, rbm.params['W'], rbm.params['c'])
        if validation_input is not None:
            validation_output = rbm.sample_h_given_v(validation_input, rbm.params['W'], rbm.params['c'])
        else:
            validation_output = None
        input = output
        validation_input = validation_output
    return rbm_layers
开发者ID:taiqing,项目名称:tensorflowNN,代码行数:26,代码来源:dbn_no_finetune.py

示例5: __init__

class SFG:
  
  def __init__(self):
    self.image_width = self.image_height = 28
    self.visible_units = self.image_width * self.image_height
    self.hidden_units = self.visible_units / 10
    self.rbm = RBM(self.visible_units, self.hidden_units)

  #assumes there are only training images in the training_folder
  def train(self, training_folder, epochs = 500):
    data = []
    for training_image in os.listdir(training_folder):
      image = pil.open(training_folder + '/' + training_image)
      image = self.array_for_image(image)
      data.append(image)

    self.rbm.train(data, epochs)
  
  #takes a pil Image and returns an arary of 1s and 0s
  def array_for_image(self, image):
    return np.array(image.convert("L")).flatten() / 255

  def regen_image(self, image, samples):
    data = self.array_for_image(image)
    (v, _) = self.rbm.regenerate([data],samples)
    return self.image_for_array(v[0])

  def image_for_array(self, array):
    img_array = []
    for row in range(0, self.image_height):
      img_array.append(array[row * self.image_width : (row+1) * self.image_width])

    img_array = np.asarray(img_array, np.uint8) * 255
    return pil.fromarray(img_array)
开发者ID:jbcumming,项目名称:RBM,代码行数:34,代码来源:smiley.py

示例6: pretrainRBM

    def pretrainRBM(self,trainset):
        trainv = np.mat(trainset[1])   # 1xn
        vlen = trainv.shape[1]
        trainnum = len(trainset)
        hlen = 500
        weights = []
        print "vlen = %d" %(vlen)
        print "Trainnum = %d" %(trainnum)
        for i in range(self.nlayers):
            rbm = RBM(vlen,hlen)
            T,e = 3,0.05
            if i == 0:
                traindata = trainset
            else:
                traindata = outdata
            outdata = np.zeros((trainnum,hlen))
            for j in range(trainnum):
                print "layer:%d CD sample %d..." %(i,j)
                trainv = np.mat(traindata[j])
                rbm.train_CD(trainv,T,e)
                outdata[j] = np.mat(rbm.sample(rbm.calc_forward(trainv)))   # 1xhlen
            self.rbm_layers.append(rbm)
            weights.append(rbm.W)
            vlen = hlen
#            hlen -= 100
        dump_data("data/dbn.pkl",weights)
        print "========= pretrainRBM complete ==========="
开发者ID:fanfannothing,项目名称:MyDBN,代码行数:27,代码来源:dbn.py

示例7: __init__

    def __init__(self,
                 input,
                 n_visible=16,
                 n_hidden=20,                 
                 W=None, hbias=None, vbias=None,
                 numpy_rng=None, theano_rng=None):

            # initialize parent class (RBM)
            RBM.__init__(self,
                         input=input,
                         n_visible=n_visible,
                         n_hidden=n_hidden,
                         W=W, hbias=hbias, vbias=vbias,
                         numpy_rng=numpy_rng, theano_rng=theano_rng)
开发者ID:Warvito,项目名称:My-tutorial,代码行数:14,代码来源:GB_rbm_CD.py

示例8: load_dbn_param

 def load_dbn_param(self,dbnpath,softmaxpath):
     weights = cPickle.load(open(dbnpath,'rb'))
     vlen,hlen = 0,0
     self.nlayers = len(weights)
     for i in range(self.nlayers):
         weight = weights[i]
         vlen,hlen = weight.shape[0],weight.shape[1]
         rbm = RBM(vlen,hlen)
         rbm.W = weight
         self.rbm_layers.append(rbm)
         print "RBM layer%d shape:%s" %(i,str(rbm.W.shape))
     self.softmax = SoftMax()
     self.softmax.load_theta(softmaxpath)
     print "softmax parameter: "+str(self.softmax.theta.shape)
开发者ID:fanfannothing,项目名称:MyDBN,代码行数:14,代码来源:dbn.py

示例9: load_from_matfile

 def load_from_matfile(cls, matfilename):
     data = loadmat(matfilename)
     stack_data = data.get('stack_data')
     numrbms = data.get('numrbms')
     rbms = []
     for mac_i in range(numrbms):
         vbias = data.get(str(mac_i)+"_visbias")
         hbias = data.get(str(mac_i)+"_hidbias")
         vishid = data.get(str(mac_i)+"_vishid")
         rbm = RBM(vbias.size, hbias.size)
         rbm.get_vislayer().bias = vbias
         rbm.get_hidlayer().bias = hbias
         rbm.weights[0] = vishid
         rbms.append(rbm)
     return cls(stack_data, rbms)
开发者ID:Khodeir,项目名称:neural-networks,代码行数:15,代码来源:rbmstack.py

示例10: RBMTest

class RBMTest(unittest.TestCase):
    def setUp(self):
        self.rbm = RBM(10,10)

    def can_make_rbm_test(self):
        rbm = RBM(10, 10)

    def logistic_function_test(self):
        self.assertEquals(self.rbm.logistic(0), 1)

    def train_throws_error_with_inconsistent_matrix_sizes_test(self):
        with self.assertRaises(TypeError):
            self.rbm.train([[1,0,1,1,1,1,0,1], [1,1,1,1,0], [1,1,1,1,1,1]])

    def regenerate_throws_error_with_inconsistent_matrix_sizes_test(self):
        with self.assertRaises(TypeError):
            self.rbm.regenerate([[1,0,1,1,1,1,0,1], [1,1,1,1,0], [1,1,1,1,1,1]])
开发者ID:nigggle,项目名称:RBM,代码行数:17,代码来源:test_test.py

示例11: fit_network

    def fit_network(self, X, labels=None):
        if labels is None:
            labels = numpy.zeros((X.shape[0], 2))
        self.layers = []
        temp_X = X
        for j in range(self.num_layers):

            print "\nTraining Layer %i" % (j + 1)
            print "components: %i" % self.components[j]
            print "batch_size: %i" % self.batch_size[j]
            print "learning_rate: %0.3f" % self.learning_rate[j]
            print "bias_learning_rate: %0.3f" % self.bias_learning_rate[j]
            print "epochs: %i" % self.epochs[j]
            print "Sparsity: %s" % str(self.sparsity_rate[j])
            print "Sparsity Phi: %s" % str(self.phi)
            if j != 0:
                self.plot_weights = False

            model = RBM(n_components=self.components[j], batch_size=self.batch_size[j],
                        learning_rate=self.learning_rate[j], regularization_mu=self.sparsity_rate[j],
                        n_iter=self.epochs[j], verbose=True, learning_rate_bias=self.bias_learning_rate[j],
                        plot_weights=self.plot_weights, plot_histograms=self.plot_histograms, phi=self.phi)

            if j + 1 == self.num_layers and labels is not None:
                model.fit(numpy.asarray(temp_X), numpy.asarray(labels))
            else:
                model.fit(numpy.asarray(temp_X))

            temp_X = model._mean_hiddens(temp_X)  # hidden layer given visable units
            print "Trained Layer %i\n" % (j + 1)

            self.layers.append(model)
开发者ID:tjvandal,项目名称:deep-learning,代码行数:32,代码来源:dbn.py

示例12: _ulogprob_hid

	def _ulogprob_hid(self, Y, num_is_samples=100):
		"""
		Estimates the unnormalized marginal log-probabilities of hidden states.
		
		Use this method only if you know what you are doing.
		"""

		# approximate this SRBM with an RBM
		rbm = RBM(self.X.shape[0], self.Y.shape[0])
		rbm.W = self.W
		rbm.b = self.b
		rbm.c = self.c

		# allocate memory
		Q = np.asmatrix(np.zeros([num_is_samples, Y.shape[1]]))

		for k in range(num_is_samples):
			# draw importance samples
			X = rbm.backward(Y)

			# store importance weights
			Q[k, :] = self._ulogprob(X, Y) - rbm._clogprob_vis_hid(X, Y)

		# average importance weights to get estimates
		return utils.logmeanexp(Q, 0)
开发者ID:Paseam,项目名称:BackgroundSubtraction_by_GBRBM,代码行数:25,代码来源:semirbm.py

示例13: pretrain_rbm_layers

def pretrain_rbm_layers(v, validation_v=None, n_hidden=[], gibbs_steps=[], batch_size=[], num_epochs=[], learning_rate=[], probe_epochs=[]):
    """
    Fake pre-training, just randomly initialising the weights of RBM layers
    :param v:
    :param validation_v:
    :param n_hidden:
    :param gibbs_steps:
    :param batch_size:
    :param num_epochs:
    :param learning_rate:
    :param probe_epochs:
    :return:
    """
    rbm_layers = []
    n_rbm = len(n_hidden)
    # create rbm layers
    for i in range(n_rbm):
        rbm = RBM(n_hidden=n_hidden[i],
                    gibbs_steps=gibbs_steps[i],
                    batch_size=batch_size[i],
                    num_epochs=num_epochs[i],
                    learning_rate=learning_rate[i],
                    probe_epochs=probe_epochs[i])
        rbm_layers.append(rbm)
    # pretrain rbm layers
    n_v = v.shape[1]
    for rbm, i in zip(rbm_layers, range(len(rbm_layers))):
        print '### pretraining RBM Layer {i}'.format(i=i)
        n_h = n_hidden[i]
        initial_W = np.float32(np.random.uniform(
            low=-4 * np.sqrt(6.0 / (n_h + n_v)),
            high=4 * np.sqrt(6.0 / (n_h + n_v)),
            size=(n_v, n_h)
        ))
        rbm.params['W'] = initial_W
        rbm.params['c'] = np.zeros((n_h, ), np.float32)
        n_v = n_h
    return rbm_layers
开发者ID:taiqing,项目名称:tensorflowNN,代码行数:38,代码来源:dbn_no_pretrain.py

示例14: test

def test(learning_rate=0.1, k=1, training_epochs=15):
  print '... loading data'

  datasets = load_data('mnist.pkl.gz')
  train_set_x, train_set_y = datasets[0]
  test_set_x, test_set_y = datasets[2]

  print '... modeling'

  rbm = RBM(input=train_set_x, n_visible=28 * 28, n_hidden=500)

  print '... training'

  start_time = time.clock()

  for epoch in xrange(training_epochs):
    cost = rbm.get_cost_updates(lr=learning_rate, k=k)
    print 'Training epoch %d, cost is ' % epoch, cost

  end_time = time.clock()
  pretraining_time = (end_time - start_time)

  print ('Training took %f minutes' % (pretraining_time / 60.))
开发者ID:belkhir-nacim,项目名称:rbm-mnist,代码行数:23,代码来源:test.py

示例15: __init__

    def __init__(self,knapsack_file="weing1.pkl"):
        super(ES, self).__init__()
        # GA stuff
        self.generations = 100
        self.knapsack = pickle.load(open(knapsack_file))
        print "k:",self.knapsack
        self.N = int(self.knapsack.items)
        # RMB stuff
        self.RBM = RBM(n_visible=self.N,n_hidden=50) 
        self.sample_RBM()

        # Stats stuff
        self.population_snapshots = []
        self.genotypes_history = Genotypes(min=False)
开发者ID:alexanderchurchill,项目名称:dbn_ga2,代码行数:14,代码来源:simple_es.py


注:本文中的rbm.RBM类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。