当前位置: 首页>>代码示例>>Python>>正文


Python tensor.zeros_like函数代码示例

本文整理汇总了Python中theano.tensor.zeros_like函数的典型用法代码示例。如果您正苦于以下问题:Python zeros_like函数的具体用法?Python zeros_like怎么用?Python zeros_like使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了zeros_like函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: lstm

def lstm(mask, state_in, t_params, n_dim_in, n_dim_out, prefix, one_step=False, init_h=None):
    '''
    Long Short-Term Memory (LSTM) layer
    '''
    def _step(_mask, _state_in, _prev_h, _prev_c):
        _pre_act = tensor.dot(_prev_h, t_params[_concat(prefix, 'U')]) + _state_in

        _gate_i = tensor.nnet.sigmoid(_slice(_pre_act, 0, n_dim_out))
        _gate_f = tensor.nnet.sigmoid(_slice(_pre_act, 1, n_dim_out))
        _gate_o = tensor.nnet.sigmoid(_slice(_pre_act, 2, n_dim_out))

        _next_c = _gate_f * _prev_c + _gate_i * tensor.tanh(_slice(_pre_act, 3, n_dim_out))
        _next_c = _mask[:, None] * _next_c + (1. - _mask)[:, None] * _prev_c
        _next_h = _gate_o * tensor.tanh(_next_c)
        _next_h = _mask[:, None] * _next_h + (1. - _mask)[:, None] * _prev_h

        return _next_h, _next_c

    params = OrderedDict()
    params[_concat(prefix, 'W')] = numpy.concatenate([ortho_weight(n_dim_in, n_dim_out), ortho_weight(n_dim_in, n_dim_out), ortho_weight(n_dim_in, n_dim_out), ortho_weight(n_dim_in, n_dim_out)], 1)
    params[_concat(prefix, 'U')] = numpy.concatenate([ortho_weight(n_dim_out, n_dim_out), ortho_weight(n_dim_out, n_dim_out), ortho_weight(n_dim_out, n_dim_out), ortho_weight(n_dim_out, n_dim_out)], 1)
    params[_concat(prefix, 'b')] = numpy.zeros((4 * n_dim_out,), config.floatX)
    init_t_params(params, t_params)

    state_in = (tensor.dot(state_in, t_params[_concat(prefix, 'W')]) + t_params[_concat(prefix, 'b')])
    if init_h is None:
        init_h = tensor.alloc(to_floatX(0.), state_in.shape[-2], n_dim_out)
    if one_step:
        state_out, _ = _step(mask, state_in, init_h, tensor.zeros_like(init_h))
        return state_out
    else:
        [state_out, _], _ = theano.scan(_step, [mask, state_in], [init_h, tensor.zeros_like(init_h)])
        return state_out
开发者ID:Mourzoufle,项目名称:seq-to-seq-rnn,代码行数:33,代码来源:layers.py

示例2: reconstruct

 def reconstruct(self, x, n_samples) :
     mu, log_sigma = self.encoder(x)
     if n_samples <= 0 :
         y = self.decoder(mu)
     else :
         #sample from posterior
         if self.continuous :
             #hack to find out size of variables
             (y_mu, y_log_sigma) = self.decoder(mu)
             (y_mu, y_log_sigma) = (T.zeros_like(y_mu), T.zeros_like(y_log_sigma))
         else :
             y = T.zeros(x.shape)
         for i in range(n_samples) :
             z = reparam_trick(mu, log_sigma, self.srng)
             if self.continuous :
                 (new_y_mu, new_y_log_sigma) = self.decoder(z)
                 y_mu = y_mu + new_y_mu
                 y_log_sigma = y_log_sigma + new_y_log_sigma
             else :
                 y = y + self.decoder(z)
         if self.continuous :
             y_mu = y_mu / n_samples
             y_log_sigma = y_log_sigma / n_samples
             y = (y_mu, y_log_sigma)
         else :
             y = (y / n_samples)
     if self.continuous :
         (y_mu, y_log_sigma) = y
         I = T.eye(y_mu.shape[0])
         cov = (T.pow(T.exp(y_log_sigma), 2)) * I
         y = np.random.multivariate_normal(y_mu.eval(), cov.eval())
     else :
         y = y.eval()
     return y
开发者ID:budzianowski,项目名称:VAEB,代码行数:34,代码来源:VAEB.py

示例3: compute_cost_log_in_parallel

def compute_cost_log_in_parallel(original_rnn_outputs, labels, func, x_ends, y_ends):
	mask = T.log(1 - T.or_(T.eq(labels, T.zeros_like(labels)), T.eq(labels, shift_matrix(labels, 2))))

	initial_state = T.log(T.zeros_like(labels))
	initial_state = T.set_subtensor(initial_state[:,0], 0)

	def select_probabilities(rnn_outputs, label):
		return rnn_outputs[:,label]	

	rnn_outputs, _ = theano.map(select_probabilities, [original_rnn_outputs, labels])
	rnn_outputs = T.log(rnn_outputs.dimshuffle((1,0,2)))

	def forward_step(probabilities, last_probabilities):
		all_forward_probabilities = T.stack(
			last_probabilities + probabilities,
			log_shift_matrix(last_probabilities, 1) + probabilities,
			log_shift_matrix(last_probabilities, 2) + probabilities + mask,
		)

		result = func(all_forward_probabilities, 0)
		return result

	forward_probabilities, _ = theano.scan(fn = forward_step, sequences = rnn_outputs, outputs_info = initial_state)
	forward_probabilities = forward_probabilities.dimshuffle((1,0,2))

	def compute_cost(forward_probabilities, x_end, y_end):
		return -func(forward_probabilities[x_end-1,y_end-2:y_end])

	return theano.map(compute_cost, [forward_probabilities, x_ends, y_ends])[0]
开发者ID:choko,项目名称:ctc,代码行数:29,代码来源:ctc.py

示例4: generic_compute_Lx_batches

def generic_compute_Lx_batches(samples, weights, biases, bs, cbs):
    tsamples = [x.reshape((bs//cbs, cbs, x.shape[1])) for x in samples]
    final_ws = [T.unbroadcast(T.shape_padleft(T.zeros_like(x)),0)
                for x in weights]
    final_bs = [T.unbroadcast(T.shape_padleft(T.zeros_like(x)),0)
                for x in biases]
    n_samples = len(samples)
    n_weights = len(weights)
    n_biases = len(biases)
    def comp_step(*args):
        lsamples = args[:n_samples]
        terms1 = generic_compute_Lx_term1(lsamples, weights, biases)
        rval = []
        for (term1, acc) in zip(terms1, args[n_samples:]):
            rval += [acc + term1]
        return rval

    rvals,_ = theano.sandbox.scan.scan(
        comp_step,
        sequences=tsamples,
        states=final_ws + final_bs,
        n_steps=bs // cbs,
        profile=0,
        mode=theano.Mode(linker='cvm_nogc'),
        flags=['no_optimization'] )
    accs1 = [x[0]/numpy.float32(bs//cbs) for x in rvals]
    accs2 = generic_compute_Lx_term2(samples,weights,biases)
    return [x - y for x, y in zip(accs1, accs2)]
开发者ID:gdesjardins,项目名称:DBM,代码行数:28,代码来源:natural.py

示例5: get_aggregator

    def get_aggregator(self):
        initialized = shared_like(0.)
        numerator_acc = shared_like(self.numerator)
        denominator_acc = shared_like(self.denominator)

        conditional_update_num = ifelse(initialized,
                                        self.numerator + numerator_acc,
                                        self.numerator)
        conditional_update_den = ifelse(initialized,
                                        self.denominator + denominator_acc,
                                        self.denominator)

        initialization_updates = [(numerator_acc,
                                   tensor.zeros_like(numerator_acc)),
                                  (denominator_acc,
                                   tensor.zeros_like(denominator_acc)),
                                  (initialized, 0.)]
        accumulation_updates = [(numerator_acc,
                                 conditional_update_num),
                                (denominator_acc,
                                 conditional_update_den),
                                (initialized, 1.)]
        aggregator = Aggregator(aggregation_scheme=self,
                                initialization_updates=initialization_updates,
                                accumulation_updates=accumulation_updates,
                                readout_variable=(numerator_acc /
                                                  denominator_acc))
        return aggregator
开发者ID:Fdenpc,项目名称:blocks,代码行数:28,代码来源:aggregation.py

示例6: compute_Lx_batches

def compute_Lx_batches(v, g, h, xw_mat, xv_mat, xa, xb, xc, bs, cbs):
    xw = xw_mat.flatten()
    xv = xv_mat.flatten()
    tv = v.reshape((bs // cbs, cbs, v.shape[1]))
    tg = g.reshape((bs // cbs, cbs, g.shape[1]))
    th = h.reshape((bs // cbs, cbs, h.shape[1]))

    final_w1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xw_mat)),0)
    final_v1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xv_mat)),0)
    final_a1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xa)),0)
    final_b1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xb)),0)
    final_c1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xc)),0)
    def comp_step(lv, lg, lh,
                  acc_w1, acc_v1, acc_a1, acc_b1, acc_c1):
        terms1 = compute_Lx_term1(lv, lg, lh, xw, xv, xa, xb, xc)
        accs1 = [acc_w1, acc_v1, acc_a1, acc_b1, acc_c1]
        rval = []

        for (term1, acc) in zip(terms1,accs1):
            rval += [acc + term1]
        return rval
    rvals,_ = theano.sandbox.scan.scan(
        comp_step,
        sequences=[tv,tg,th],
        states=[
            final_w1, final_v1, final_a1, final_b1, final_c1],
        n_steps=bs // cbs,
        profile=0,
        mode=theano.Mode(linker='cvm_nogc'),
        flags=['no_optimization'] )
    accs1 = [x[0]/numpy.float32(bs//cbs) for x in rvals]
    accs2 = compute_Lx_term2(v,g,h,xw,xv,xa,xb,xc)
    return [x - y for x, y in zip(accs1, accs2)]
开发者ID:gdesjardins,项目名称:DBM,代码行数:33,代码来源:natural.py

示例7: get_aggregator

    def get_aggregator(self):
        initialized = shared_like(0.)
        total_acc = shared_like(self.variable)

        total_zeros = tensor.as_tensor(self.variable).zeros_like()

        conditional_update_num = self.variable + ifelse(initialized,
                                                         total_acc,
                                                         total_zeros)

        initialization_updates = [(total_acc,
                                   tensor.zeros_like(total_acc)),
                                  (initialized,
                                   tensor.zeros_like(initialized))]

        accumulation_updates = [(total_acc,
                                 conditional_update_num),
                                (initialized, tensor.ones_like(initialized))]

        aggregator = Aggregator(aggregation_scheme=self,
                                initialization_updates=initialization_updates,
                                accumulation_updates=accumulation_updates,
                                readout_variable=(total_acc))

        return aggregator
开发者ID:davidbau,项目名称:net-intent,代码行数:25,代码来源:ablation.py

示例8: grad

    def grad(self, inputs, out_grads):
        batch_mean, rolling_mean, rolling_grad, alpha = inputs
        out_grad, = out_grads

        if self.update_averages:
            assert treeano.utils.is_shared_variable(rolling_mean)
            assert treeano.utils.is_shared_variable(rolling_grad)
            # HACK this is super hacky and won't work for certain
            # computation graphs
            # TODO make assertion again
            if (hasattr(rolling_mean, "default_update") or
                    hasattr(rolling_grad, "default_update")):
                warnings.warn("rolling mean/grad already has updates - "
                              "overwritting. this can be caused by calculating "
                              "the gradient of backprop to the future mean "
                              "multiple times")

            rolling_mean.default_update = (alpha * rolling_mean +
                                           (1 - alpha) * batch_mean)
            rolling_grad.default_update = (alpha * rolling_grad +
                                           (1 - alpha) * out_grad)
        else:
            # HACK remove default_update
            if hasattr(rolling_mean, "default_update"):
                delattr(rolling_mean, "default_update")
            if hasattr(rolling_grad, "default_update"):
                delattr(rolling_grad, "default_update")

        return [rolling_grad,
                T.zeros_like(rolling_mean),
                T.zeros_like(rolling_grad),
                T.zeros_like(alpha)]
开发者ID:diogo149,项目名称:treeano,代码行数:32,代码来源:bttf_mean.py

示例9: _construct_compute_fe_terms

 def _construct_compute_fe_terms(self):
     """
     Construct theano function to compute the log-likelihood and posterior
     KL-divergence terms for the variational free-energy.
     """
     # setup some symbolic variables for theano to deal with
     Xd = T.matrix()
     Xc = T.zeros_like(Xd)
     Xm = T.zeros_like(Xd)
     # construct values to output
     if self.x_type == 'bernoulli':
         ll_term = log_prob_bernoulli(self.x, self.xg)
     else:
         ll_term = log_prob_gaussian2(self.x, self.xg, \
                 log_vars=self.bounded_logvar)
     all_klds = gaussian_kld(self.q_z_given_x.output_mean, \
             self.q_z_given_x.output_logvar, \
             self.prior_mean, self.prior_logvar)
     kld_term = T.sum(all_klds, axis=1)
     # compile theano function for a one-sample free-energy estimate
     fe_term_sample = theano.function(inputs=[Xd], \
             outputs=[ll_term, kld_term], \
             givens={self.Xd: Xd, self.Xc: Xc, self.Xm: Xm})
     # construct a wrapper function for multi-sample free-energy estimate
     def fe_term_estimator(X, sample_count):
         ll_sum = np.zeros((X.shape[0],))
         kld_sum = np.zeros((X.shape[0],))
         for i in range(sample_count):
             result = fe_term_sample(X)
             ll_sum = ll_sum + result[0].ravel()
             kld_sum = kld_sum + result[1].ravel()
         mean_nll = -ll_sum / float(sample_count)
         mean_kld = kld_sum / float(sample_count)
         return [mean_nll, mean_kld]
     return fe_term_estimator
开发者ID:Philip-Bachman,项目名称:ICML-2015,代码行数:35,代码来源:OneStageModel.py

示例10: get_celerite_matrices

    def get_celerite_matrices(self, x, diag):
        x = tt.as_tensor_variable(x)
        diag = tt.as_tensor_variable(diag)
        ar, cr, ac, bc, cc, dc = self.coefficients
        a = diag + tt.sum(ar) + tt.sum(ac)
        U = tt.concatenate((
            ar[None, :] + tt.zeros_like(x)[:, None],
            ac[None, :] * tt.cos(dc[None, :] * x[:, None])
            + bc[None, :] * tt.sin(dc[None, :] * x[:, None]),
            ac[None, :] * tt.sin(dc[None, :] * x[:, None])
            - bc[None, :] * tt.cos(dc[None, :] * x[:, None]),
        ), axis=1)

        V = tt.concatenate((
            tt.zeros_like(ar)[None, :] + tt.ones_like(x)[:, None],
            tt.cos(dc[None, :] * x[:, None]),
            tt.sin(dc[None, :] * x[:, None]),
        ), axis=1)

        dx = x[1:] - x[:-1]
        P = tt.concatenate((
            tt.exp(-cr[None, :] * dx[:, None]),
            tt.exp(-cc[None, :] * dx[:, None]),
            tt.exp(-cc[None, :] * dx[:, None]),
        ), axis=1)

        return a, U, V, P
开发者ID:dfm,项目名称:exoplanet,代码行数:27,代码来源:terms.py

示例11: lstm_layer

def lstm_layer(hidden_inpt, hidden_to_hidden,
               ingate_peephole, outgate_peephole, forgetgate_peephole,
               f):
    n_hidden_out = hidden_to_hidden.shape[0]

    def lstm_step(x_t, s_tm1, h_tm1):
        x_t += T.dot(h_tm1, hidden_to_hidden)

        inpt = T.tanh(x_t[:, :n_hidden_out])
        gates = x_t[:, n_hidden_out:]
        inpeep = s_tm1 * ingate_peephole
        outpeep = s_tm1 * outgate_peephole
        forgetpeep = s_tm1 * forgetgate_peephole

        ingate = f(gates[:, :n_hidden_out] + inpeep)
        forgetgate = f(
            gates[:, n_hidden_out:2 * n_hidden_out] + forgetpeep)
        outgate = f(gates[:, 2 * n_hidden_out:] + outpeep)

        s_t = inpt * ingate + s_tm1 * forgetgate
        h_t = f(s_t) * outgate
        return [s_t, h_t]

    (states, hidden_rec), _ = theano.scan(
        lstm_step,
        sequences=hidden_inpt,
        outputs_info=[T.zeros_like(hidden_inpt[0, :, 0:n_hidden_out]),
                      T.zeros_like(hidden_inpt[0, :, 0:n_hidden_out])
                      ])

    return states, hidden_rec
开发者ID:ddofer,项目名称:breze,代码行数:31,代码来源:rnn.py

示例12: mf

    def mf(self, V, Y = None, return_history = False, niter = None, block_grad = None):

        drop_mask = T.zeros_like(V)

        if Y is not None:
            drop_mask_Y = T.zeros_like(Y)
        else:
            batch_size = V.shape[0]
            num_classes = self.dbm.hidden_layers[-1].n_classes
            assert isinstance(num_classes, int)
            Y = T.alloc(1., V.shape[0], num_classes)
            drop_mask_Y = T.alloc(1., V.shape[0])

        history = self.do_inpainting(X=V,
            Y=Y,
            return_history=True,
            drop_mask=drop_mask,
            drop_mask_Y=drop_mask_Y,
            noise=False,
            niter=niter,
            block_grad=block_grad)

        if return_history:
            return [elem['H_hat'] for elem in history]

        return history[-1]['H_hat']
开发者ID:cc13ny,项目名称:galatea,代码行数:26,代码来源:ensemble.py

示例13: rnade_sym

 def rnade_sym(self,x,W,V_alpha,b_alpha,V_mu,b_mu,V_sigma,b_sigma,activation_rescaling):
     """ x is a matrix of column datapoints (VxB) V = n_visible, B = batch size """
     def density_given_previous_a_and_x(x, w, V_alpha, b_alpha, V_mu, b_mu, V_sigma, b_sigma,activation_factor, p_prev, a_prev, x_prev,):
         a = a_prev + T.dot(T.shape_padright(x_prev, 1), T.shape_padleft(w, 1))
         h = self.nonlinearity(a * activation_factor)  # BxH
         #x = theano.printing.Print('x')(x)
         Alpha = T.nnet.softmax(T.dot(h, V_alpha) + T.shape_padleft(b_alpha))  # BxC
         Alpha = theano.printing.Print('Alphas')(Alpha)
         Mu = T.dot(h, V_mu) + T.shape_padleft(b_mu)  # BxC
         Mu = theano.printing.Print('Mu')(Mu)
         Sigma = T.exp((T.dot(h, V_sigma) + T.shape_padleft(b_sigma)))  # BxC
         Sigma = theano.printing.Print('Sigmas')(Sigma)
         arg = -constantX(0.5) * T.sqr((Mu - T.shape_padright(x, 1)) / Sigma) - T.log(Sigma) - constantX(0.5 * numpy.log(2 * numpy.pi)) + T.log(Alpha)
         arg = theano.printing.Print('printing argument of logsumexp')(arg)
         p_var = log_sum_exp(arg)
         p_var = theano.printing.Print('p_var')(p_var)
         p = p_prev + p_var
         #p = theano.printing.Print('p')(p)
         return (p, a, x)
     # First element is different (it is predicted from the bias only)
     a0 = T.zeros_like(T.dot(x.T, W))  # BxH
     p0 = T.zeros_like(x[0])
     x0 = T.ones_like(x[0])    
     ([ps, _as, _xs], updates) = theano.scan(density_given_previous_a_and_x,
                                             sequences=[x, W, V_alpha, b_alpha,V_mu,b_mu,V_sigma,b_sigma,activation_rescaling],
                                             outputs_info=[p0, a0, x0])
     return (ps[-1], updates)
开发者ID:sidsig,项目名称:NIPS-2014,代码行数:27,代码来源:RNN_RNADE_slow.py

示例14: filter_and_prob

def filter_and_prob(inpt, transition, emission,
           visible_noise_mean, visible_noise_cov,
           hidden_noise_mean, hidden_noise_cov,
           initial_hidden, initial_hidden_cov):
    step = forward_step(
        transition, emission,
        visible_noise_mean, visible_noise_cov,
        hidden_noise_mean, hidden_noise_cov)

    hidden_mean_0 = T.zeros_like(hidden_noise_mean).dimshuffle('x', 0)
    hidden_cov_0 = T.zeros_like(hidden_noise_cov).dimshuffle('x', 0, 1)
    f0, F0, ll0 = step(inpt[0], hidden_mean_0, hidden_cov_0)
    replace = {hidden_noise_mean: initial_hidden, 
               hidden_noise_cov: initial_hidden_cov}
    f0 = theano.clone(f0, replace)
    F0 = theano.clone(F0, replace)
    ll0 = theano.clone(ll0, replace)

    (f, F, ll), _ = theano.scan(
        step,
        sequences=inpt[1:],
        outputs_info=[f0, F0, None])

    ll = ll.sum(axis=0)

    f = T.concatenate([T.shape_padleft(f0), f])
    F = T.concatenate([T.shape_padleft(F0), F])
    ll += ll0

    return f, F, ll
开发者ID:ddofer,项目名称:breze,代码行数:30,代码来源:lds.py

示例15: create_cost_fun

	def create_cost_fun (self):

		# create a cost function that
		# takes each prediction at every timestep
		# and guesses next timestep's value:
		what_to_predict = self.input_mat[:, 1:]
		# because some sentences are shorter, we
		# place masks where the sentences end:
		# (for how long is zero indexed, e.g. an example going from `[2,3)`)
		# has this value set 0 (here we substract by 1):
		for_how_long = self.for_how_long - 1
		# all sentences start at T=0:
		starting_when = T.zeros_like(self.for_how_long)
								 
		self.lstm_cost = masked_loss(self.lstm_predictions,
								what_to_predict,
								for_how_long,
								starting_when).sum()

		zero_entropy = T.zeros_like(self.entropy)
		real_entropy = T.switch(self.mask_matrix,self.entropy,zero_entropy)
		zero_key_entropy = T.zeros_like(self.key_entropy)
		real_key_entropy = T.switch(self.mask_matrix,self.key_entropy,zero_key_entropy)

		self.final_cost = masked_loss(self.final_predictions,
								what_to_predict,
								for_how_long,
								starting_when).sum()+self.entropy_reg*real_entropy.sum()+self.key_entropy_reg*real_key_entropy.sum()
开发者ID:darongliu,项目名称:Lstm_Turing_LM,代码行数:28,代码来源:lm_v4.py


注:本文中的theano.tensor.zeros_like函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。