當前位置: 首頁>>代碼示例>>Python>>正文


Python tensor.zeros_like方法代碼示例

本文整理匯總了Python中theano.tensor.zeros_like方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.zeros_like方法的具體用法?Python tensor.zeros_like怎麽用?Python tensor.zeros_like使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.zeros_like方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: ctc_path_probs

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros_like [as 別名]
def ctc_path_probs(predict, Y, alpha=1e-4):
    smoothed_predict = (1 - alpha) * predict[:, Y] + alpha * np.float32(1.) / Y.shape[0]
    L = T.log(smoothed_predict)
    zeros = T.zeros_like(L[0])
    log_first = zeros

    f_skip_idxs = ctc_create_skip_idxs(Y)
    b_skip_idxs = ctc_create_skip_idxs(Y[::-1])  # there should be a shortcut to calculating this

    def step(log_f_curr, log_b_curr, f_active, log_f_prev, b_active, log_b_prev):
        f_active_next, log_f_next = ctc_update_log_p(f_skip_idxs, zeros, f_active, log_f_curr, log_f_prev)
        b_active_next, log_b_next = ctc_update_log_p(b_skip_idxs, zeros, b_active, log_b_curr, log_b_prev)
        return f_active_next, log_f_next, b_active_next, log_b_next

    [f_active, log_f_probs, b_active, log_b_probs], _ = theano.scan(
        step, sequences=[L, L[::-1, ::-1]], outputs_info=[np.int32(1), log_first, np.int32(1), log_first])

    idxs = T.arange(L.shape[1]).dimshuffle('x', 0)
    mask = (idxs < f_active.dimshuffle(0, 'x')) & (idxs < b_active.dimshuffle(0, 'x'))[::-1, ::-1]
    log_probs = log_f_probs + log_b_probs[::-1, ::-1] - L
    return log_probs, mask 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:23,代碼來源:theano_backend.py

示例2: reduce_log_sum

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros_like [as 別名]
def reduce_log_sum(tensor, axis=None, guaranteed_finite=False):
    """
    Sum probabilities in the log domain, i.e return
        log(e^vec[0] + e^vec[1] + ...)
        = log(e^x e^(vec[0]-x) + e^x e^(vec[1]-x) + ...)
        = log(e^x [e^(vec[0]-x) + e^(vec[1]-x) + ...])
        = log(e^x) + log(e^(vec[0]-x) + e^(vec[1]-x) + ...)
        = x + log(e^(vec[0]-x) + e^(vec[1]-x) + ...)
    For numerical stability, we choose x = max(vec)
    Note that if x is -inf, that means all values are -inf,
    so the answer should be -inf. In this case, choose x = 0
    """
    maxval = T.max(tensor, axis)
    maxval_full = T.max(tensor, axis, keepdims=True)
    if not guaranteed_finite:
        maxval = T.switch(T.isfinite(maxval), maxval, T.zeros_like(maxval))
        maxval_full = T.switch(T.isfinite(maxval_full), maxval_full, T.zeros_like(maxval_full))
    reduced_sum = T.sum(T.exp(tensor - maxval_full), axis)
    logsum = maxval + T.log(reduced_sum)
    return logsum 
開發者ID:hexahedria,項目名稱:gated-graph-transformer-network,代碼行數:22,代碼來源:util.py

示例3: __init__

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros_like [as 別名]
def __init__(self):
        def f(x, u, i, terminal):
            if terminal:
                ctrl_cost = T.zeros_like(x[..., 0])
            else:
                ctrl_cost = T.square(u).sum(axis=-1)

            # x: (batch_size, 8)
            # x[..., 0:4]: qpos
            # x[..., 4:8]: qvel, time derivatives of qpos, not used in the cost.
            theta = x[..., 0]  # qpos[0]: angle of joint 0
            phi = x[..., 1]  # qpos[1]: angle of joint 1
            target_xpos = x[..., 2:4]  # qpos[2:4], target x & y coordinate
            body1_xpos = 0.1 * T.stack([T.cos(theta), T.sin(theta)], axis=1)
            tip_xpos_incr = 0.11 * T.stack([T.cos(phi), T.sin(phi)], axis=1)
            tip_xpos = body1_xpos + tip_xpos_incr
            delta = tip_xpos - target_xpos

            state_cost = T.sqrt(T.sum(delta * delta, axis=-1))
            cost = state_cost + ctrl_cost

            return cost

        super().__init__(f, state_size=8, action_size=2) 
開發者ID:HumanCompatibleAI,項目名稱:adversarial-policies,代碼行數:26,代碼來源:mujoco_costs.py

示例4: sp_zeros_like

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros_like [as 別名]
def sp_zeros_like(x):
    """
    Construct a sparse matrix of zeros.

    Parameters
    ----------
    x
        Sparse matrix to take the shape.

    Returns
    -------
    A sparse matrix
        The same as `x` with zero entries for all element.

    """

    # TODO: don't restrict to CSM formats
    _, _, indptr, shape = csm_properties(x)
    return CSM(format=x.format)(data=numpy.array([], dtype=x.type.dtype),
                                indices=numpy.array([], dtype='int32'),
                                indptr=tensor.zeros_like(indptr),
                                shape=shape) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:24,代碼來源:basic.py

示例5: grad

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros_like [as 別名]
def grad(self, inputs, g):

        # g[1:] is all integers, so their Jacobian in this op
        # is 0. We thus don't need to worry about what their values
        # are.

        # if g[0] is disconnected, then this op doesn't contribute
        # any gradient anywhere. but we know that at least one of
        # g[1:] is connected, or this grad method wouldn't have been
        # called, so we should report zeros
        (csm,) = inputs
        if isinstance(g[0].type, DisconnectedType):
            return [csm.zeros_like()]

        data, indices, indptr, shape = csm_properties(csm)
        return [CSM(csm.format)(g[0], indices, indptr, shape)]

# don't make this a function or it breaks some optimizations below 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:20,代碼來源:basic.py

示例6: test_gpujoin_gpualloc

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros_like [as 別名]
def test_gpujoin_gpualloc():
    a = T.fmatrix('a')
    a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
    b = T.fmatrix('b')
    b_val = numpy.asarray(numpy.random.rand(3, 5), dtype='float32')

    f = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)) + 4,
                        mode=mode_without_gpu)
    f_gpu = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)),
                            mode=mode_with_gpu)
    f_gpu2 = theano.function([a, b], T.join(0, T.zeros_like(a),
                                           T.ones_like(b)) + 4,
                             mode=mode_with_gpu)

    assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 2
    assert sum([node.op == T.join for node in f.maker.fgraph.toposort()]) == 1
    assert sum([isinstance(node.op, B.GpuAlloc)
                for node in f_gpu.maker.fgraph.toposort()]) == 2
    assert sum([node.op == B.gpu_join
                for node in f_gpu.maker.fgraph.toposort()]) == 1
    assert sum([isinstance(node.op, B.GpuAlloc)
                for node in f_gpu2.maker.fgraph.toposort()]) == 2
    assert sum([node.op == B.gpu_join
                for node in f_gpu2.maker.fgraph.toposort()]) == 1
    assert numpy.allclose(f(a_val, b_val), f_gpu2(a_val, b_val)) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:27,代碼來源:test_basic_ops.py

示例7: test_gpujoin_gpualloc

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros_like [as 別名]
def test_gpujoin_gpualloc():
    a = T.fmatrix('a')
    a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
    b = T.fmatrix('b')
    b_val = numpy.asarray(numpy.random.rand(3, 5), dtype='float32')

    f = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)) + 4,
                        mode=mode_without_gpu)
    f_gpu = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)),
                            mode=mode_with_gpu)
    f_gpu2 = theano.function([a, b], T.join(0, T.zeros_like(a),
                                            T.ones_like(b)) + 4,
                             mode=mode_with_gpu)
    assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 2
    assert sum([node.op == T.join for node in f.maker.fgraph.toposort()]) == 1
    assert sum([isinstance(node.op, GpuAlloc)
                for node in f_gpu.maker.fgraph.toposort()]) == 2
    assert sum([node.op == gpu_join
                for node in f_gpu.maker.fgraph.toposort()]) == 1
    assert sum([isinstance(node.op, GpuAlloc)
                for node in f_gpu2.maker.fgraph.toposort()]) == 2
    assert sum([node.op == gpu_join
                for node in f_gpu2.maker.fgraph.toposort()]) == 1
    assert numpy.allclose(f(a_val, b_val), f_gpu2(a_val, b_val)) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:26,代碼來源:test_basic_ops.py

示例8: build_mf_reset_function

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros_like [as 別名]
def build_mf_reset_function(self):
        if not hasattr(self, 'mf_reset_fn'):
            # Compile functions
            logger.debug("Building mean field reset function")

            mf_reset_update = []

            if self.add_latent_gaussian_per_utterance:
                mf_reset_update.append((self.latent_gaussian_utterance_variable_approx_posterior_mean_mfbias, T.zeros_like(self.latent_gaussian_utterance_variable_approx_posterior_mean_mfbias)))
                mf_reset_update.append((self.latent_gaussian_utterance_variable_approx_posterior_var_mfbias, T.zeros_like(self.latent_gaussian_utterance_variable_approx_posterior_var_mfbias)))

            if self.add_latent_piecewise_per_utterance:
                mf_reset_update.append((self.latent_piecewise_utterance_variable_approx_posterior_alpha_mfbias, T.zeros_like(self.latent_piecewise_utterance_variable_approx_posterior_alpha_mfbias)))
            


            self.mf_reset_fn = theano.function(inputs=[],
                                                outputs=[],
                                                updates=mf_reset_update, 
                                                on_unused_input='warn', 
                                                name="mf_reset_fn")

        return self.mf_reset_fn

    # Batch saliency evaluation function. 
開發者ID:julianser,項目名稱:hred-latent-piecewise,代碼行數:27,代碼來源:dialog_encdec.py

示例9: AdaMax

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros_like [as 別名]
def AdaMax(w, objective, alpha=.01, beta1=.1, beta2=.001):
    print 'AdaMax', 'alpha:',alpha,'beta1:',beta1,'beta2:',beta2
    g = T.grad(objective.sum(), w, disconnected_inputs='warn')
    
    new = OrderedDict()
    
    for i in range(len(w)):
        #gi = T.switch(T.isnan(gi),T.zeros_like(gi),gi) #remove NaN's
        mom1 = G.sharedf(w[i].get_value() * 0.)
        _max = G.sharedf(w[i].get_value() * 0.)
        new[mom1] = (1-beta1) * mom1 + beta1 * g[i]
        new[_max] = T.maximum((1-beta2)*_max, abs(g[i]) + 1e-8)
        new[w[i]] = w[i] + alpha *  new[mom1] / new[_max]
                
    return new

# AdaMax that averages over multiple minibatches 
開發者ID:openai,項目名稱:iaf,代碼行數:19,代碼來源:optim.py

示例10: normalize_batch_in_training

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros_like [as 別名]
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    if gamma is None:
        if beta is None:
            gamma = ones_like(x)
        else:
            gamma = ones_like(beta)
    if beta is None:
        if gamma is None:
            beta = zeros_like(x)
        beta = zeros_like(gamma)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2) 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:25,代碼來源:theano_backend.py

示例11: batch_normalization

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros_like [as 別名]
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
    """Apply batch normalization on x given mean, var, beta and gamma.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_test is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_test'):
        return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)

    if gamma is None:
        gamma = ones_like(var)
    if beta is None:
        beta = zeros_like(mean)

    if mean.ndim == 1:
        # based on TensorFlow's default: normalize along rightmost dimension
        reduction_axes = list(range(x.ndim - 1))
    else:
        reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]

    return T.nnet.bn.batch_normalization_test(
        x, gamma, beta, mean, var, reduction_axes, epsilon)


# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:27,代碼來源:theano_backend.py

示例12: get_aggregator

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros_like [as 別名]
def get_aggregator(self):
        initialized = shared_like(0.)
        expression_acc = shared_like(self.expression)

        # Dummy default expression to use as the previously-accumulated
        # value, that has the same shape as the new result
        expression_zeros = tensor.as_tensor(self.expression).zeros_like()

        conditional_update_expr = self.expression + ifelse(initialized,
                                                           expression_acc,
                                                           expression_zeros)

        initialization_updates = [(expression_acc,
                                   tensor.zeros_like(expression_acc)),
                                  (initialized, 0.)]
        accumulation_updates = [(expression_acc,
                                 conditional_update_expr),
                                (initialized, 1.)]
        aggregator = Aggregator(aggregation_scheme=self,
                                initialization_updates=initialization_updates,
                                accumulation_updates=accumulation_updates,
                                readout_variable=(expression_acc))
        return aggregator 
開發者ID:rizar,項目名稱:attention-lvcsr,代碼行數:25,代碼來源:aggregation.py

示例13: batch_normalization

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros_like [as 別名]
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
    """Apply batch normalization on x given mean, var, beta and gamma.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_test is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_test'):
        return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)

    if gamma is None:
        gamma = ones_like(var)
    if beta is None:
        beta = zeros_like(mean)

    if mean.ndim == 1:
        # based on TensorFlow's default: normalize along rightmost dimension
        reduction_axes = list(range(x.ndim - 1))
    else:
        reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]

    return T.nnet.bn.batch_normalization_test(
        x, gamma, beta, mean, var, reduction_axes, epsilon)


# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:27,代碼來源:theano_backend.py

示例14: zeros_like

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros_like [as 別名]
def zeros_like(x, name=None):
    return T.zeros_like(x) 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:4,代碼來源:theano_backend.py

示例15: categorical_best

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros_like [as 別名]
def categorical_best(tensor):
    """
    tensor should be a tensor of shape (..., categories)
    Return a new tensor of the same shape but one-hot at position of best category
    """
    flat_tensor = tensor.reshape([-1, tensor.shape[-1]])
    argmax_posns = T.argmax(flat_tensor, 1)
    flat_snapped = T.zeros_like(flat_tensor)
    flat_snapped = T.set_subtensor(flat_snapped[T.arange(flat_tensor.shape[0]), argmax_posns], 1.0)
    snapped = flat_snapped.reshape(tensor.shape)
    return snapped 
開發者ID:hexahedria,項目名稱:gated-graph-transformer-network,代碼行數:13,代碼來源:util.py


注:本文中的theano.tensor.zeros_like方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。