当前位置: 首页>>代码示例>>Python>>正文


Python tensor.zeros_like方法代码示例

本文整理汇总了Python中theano.tensor.zeros_like方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.zeros_like方法的具体用法?Python tensor.zeros_like怎么用?Python tensor.zeros_like使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.zeros_like方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: ctc_path_probs

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import zeros_like [as 别名]
def ctc_path_probs(predict, Y, alpha=1e-4):
    smoothed_predict = (1 - alpha) * predict[:, Y] + alpha * np.float32(1.) / Y.shape[0]
    L = T.log(smoothed_predict)
    zeros = T.zeros_like(L[0])
    log_first = zeros

    f_skip_idxs = ctc_create_skip_idxs(Y)
    b_skip_idxs = ctc_create_skip_idxs(Y[::-1])  # there should be a shortcut to calculating this

    def step(log_f_curr, log_b_curr, f_active, log_f_prev, b_active, log_b_prev):
        f_active_next, log_f_next = ctc_update_log_p(f_skip_idxs, zeros, f_active, log_f_curr, log_f_prev)
        b_active_next, log_b_next = ctc_update_log_p(b_skip_idxs, zeros, b_active, log_b_curr, log_b_prev)
        return f_active_next, log_f_next, b_active_next, log_b_next

    [f_active, log_f_probs, b_active, log_b_probs], _ = theano.scan(
        step, sequences=[L, L[::-1, ::-1]], outputs_info=[np.int32(1), log_first, np.int32(1), log_first])

    idxs = T.arange(L.shape[1]).dimshuffle('x', 0)
    mask = (idxs < f_active.dimshuffle(0, 'x')) & (idxs < b_active.dimshuffle(0, 'x'))[::-1, ::-1]
    log_probs = log_f_probs + log_b_probs[::-1, ::-1] - L
    return log_probs, mask 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:23,代码来源:theano_backend.py

示例2: reduce_log_sum

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import zeros_like [as 别名]
def reduce_log_sum(tensor, axis=None, guaranteed_finite=False):
    """
    Sum probabilities in the log domain, i.e return
        log(e^vec[0] + e^vec[1] + ...)
        = log(e^x e^(vec[0]-x) + e^x e^(vec[1]-x) + ...)
        = log(e^x [e^(vec[0]-x) + e^(vec[1]-x) + ...])
        = log(e^x) + log(e^(vec[0]-x) + e^(vec[1]-x) + ...)
        = x + log(e^(vec[0]-x) + e^(vec[1]-x) + ...)
    For numerical stability, we choose x = max(vec)
    Note that if x is -inf, that means all values are -inf,
    so the answer should be -inf. In this case, choose x = 0
    """
    maxval = T.max(tensor, axis)
    maxval_full = T.max(tensor, axis, keepdims=True)
    if not guaranteed_finite:
        maxval = T.switch(T.isfinite(maxval), maxval, T.zeros_like(maxval))
        maxval_full = T.switch(T.isfinite(maxval_full), maxval_full, T.zeros_like(maxval_full))
    reduced_sum = T.sum(T.exp(tensor - maxval_full), axis)
    logsum = maxval + T.log(reduced_sum)
    return logsum 
开发者ID:hexahedria,项目名称:gated-graph-transformer-network,代码行数:22,代码来源:util.py

示例3: __init__

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import zeros_like [as 别名]
def __init__(self):
        def f(x, u, i, terminal):
            if terminal:
                ctrl_cost = T.zeros_like(x[..., 0])
            else:
                ctrl_cost = T.square(u).sum(axis=-1)

            # x: (batch_size, 8)
            # x[..., 0:4]: qpos
            # x[..., 4:8]: qvel, time derivatives of qpos, not used in the cost.
            theta = x[..., 0]  # qpos[0]: angle of joint 0
            phi = x[..., 1]  # qpos[1]: angle of joint 1
            target_xpos = x[..., 2:4]  # qpos[2:4], target x & y coordinate
            body1_xpos = 0.1 * T.stack([T.cos(theta), T.sin(theta)], axis=1)
            tip_xpos_incr = 0.11 * T.stack([T.cos(phi), T.sin(phi)], axis=1)
            tip_xpos = body1_xpos + tip_xpos_incr
            delta = tip_xpos - target_xpos

            state_cost = T.sqrt(T.sum(delta * delta, axis=-1))
            cost = state_cost + ctrl_cost

            return cost

        super().__init__(f, state_size=8, action_size=2) 
开发者ID:HumanCompatibleAI,项目名称:adversarial-policies,代码行数:26,代码来源:mujoco_costs.py

示例4: sp_zeros_like

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import zeros_like [as 别名]
def sp_zeros_like(x):
    """
    Construct a sparse matrix of zeros.

    Parameters
    ----------
    x
        Sparse matrix to take the shape.

    Returns
    -------
    A sparse matrix
        The same as `x` with zero entries for all element.

    """

    # TODO: don't restrict to CSM formats
    _, _, indptr, shape = csm_properties(x)
    return CSM(format=x.format)(data=numpy.array([], dtype=x.type.dtype),
                                indices=numpy.array([], dtype='int32'),
                                indptr=tensor.zeros_like(indptr),
                                shape=shape) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:24,代码来源:basic.py

示例5: grad

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import zeros_like [as 别名]
def grad(self, inputs, g):

        # g[1:] is all integers, so their Jacobian in this op
        # is 0. We thus don't need to worry about what their values
        # are.

        # if g[0] is disconnected, then this op doesn't contribute
        # any gradient anywhere. but we know that at least one of
        # g[1:] is connected, or this grad method wouldn't have been
        # called, so we should report zeros
        (csm,) = inputs
        if isinstance(g[0].type, DisconnectedType):
            return [csm.zeros_like()]

        data, indices, indptr, shape = csm_properties(csm)
        return [CSM(csm.format)(g[0], indices, indptr, shape)]

# don't make this a function or it breaks some optimizations below 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:20,代码来源:basic.py

示例6: test_gpujoin_gpualloc

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import zeros_like [as 别名]
def test_gpujoin_gpualloc():
    a = T.fmatrix('a')
    a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
    b = T.fmatrix('b')
    b_val = numpy.asarray(numpy.random.rand(3, 5), dtype='float32')

    f = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)) + 4,
                        mode=mode_without_gpu)
    f_gpu = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)),
                            mode=mode_with_gpu)
    f_gpu2 = theano.function([a, b], T.join(0, T.zeros_like(a),
                                           T.ones_like(b)) + 4,
                             mode=mode_with_gpu)

    assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 2
    assert sum([node.op == T.join for node in f.maker.fgraph.toposort()]) == 1
    assert sum([isinstance(node.op, B.GpuAlloc)
                for node in f_gpu.maker.fgraph.toposort()]) == 2
    assert sum([node.op == B.gpu_join
                for node in f_gpu.maker.fgraph.toposort()]) == 1
    assert sum([isinstance(node.op, B.GpuAlloc)
                for node in f_gpu2.maker.fgraph.toposort()]) == 2
    assert sum([node.op == B.gpu_join
                for node in f_gpu2.maker.fgraph.toposort()]) == 1
    assert numpy.allclose(f(a_val, b_val), f_gpu2(a_val, b_val)) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:27,代码来源:test_basic_ops.py

示例7: test_gpujoin_gpualloc

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import zeros_like [as 别名]
def test_gpujoin_gpualloc():
    a = T.fmatrix('a')
    a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
    b = T.fmatrix('b')
    b_val = numpy.asarray(numpy.random.rand(3, 5), dtype='float32')

    f = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)) + 4,
                        mode=mode_without_gpu)
    f_gpu = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)),
                            mode=mode_with_gpu)
    f_gpu2 = theano.function([a, b], T.join(0, T.zeros_like(a),
                                            T.ones_like(b)) + 4,
                             mode=mode_with_gpu)
    assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 2
    assert sum([node.op == T.join for node in f.maker.fgraph.toposort()]) == 1
    assert sum([isinstance(node.op, GpuAlloc)
                for node in f_gpu.maker.fgraph.toposort()]) == 2
    assert sum([node.op == gpu_join
                for node in f_gpu.maker.fgraph.toposort()]) == 1
    assert sum([isinstance(node.op, GpuAlloc)
                for node in f_gpu2.maker.fgraph.toposort()]) == 2
    assert sum([node.op == gpu_join
                for node in f_gpu2.maker.fgraph.toposort()]) == 1
    assert numpy.allclose(f(a_val, b_val), f_gpu2(a_val, b_val)) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:26,代码来源:test_basic_ops.py

示例8: build_mf_reset_function

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import zeros_like [as 别名]
def build_mf_reset_function(self):
        if not hasattr(self, 'mf_reset_fn'):
            # Compile functions
            logger.debug("Building mean field reset function")

            mf_reset_update = []

            if self.add_latent_gaussian_per_utterance:
                mf_reset_update.append((self.latent_gaussian_utterance_variable_approx_posterior_mean_mfbias, T.zeros_like(self.latent_gaussian_utterance_variable_approx_posterior_mean_mfbias)))
                mf_reset_update.append((self.latent_gaussian_utterance_variable_approx_posterior_var_mfbias, T.zeros_like(self.latent_gaussian_utterance_variable_approx_posterior_var_mfbias)))

            if self.add_latent_piecewise_per_utterance:
                mf_reset_update.append((self.latent_piecewise_utterance_variable_approx_posterior_alpha_mfbias, T.zeros_like(self.latent_piecewise_utterance_variable_approx_posterior_alpha_mfbias)))
            


            self.mf_reset_fn = theano.function(inputs=[],
                                                outputs=[],
                                                updates=mf_reset_update, 
                                                on_unused_input='warn', 
                                                name="mf_reset_fn")

        return self.mf_reset_fn

    # Batch saliency evaluation function. 
开发者ID:julianser,项目名称:hred-latent-piecewise,代码行数:27,代码来源:dialog_encdec.py

示例9: AdaMax

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import zeros_like [as 别名]
def AdaMax(w, objective, alpha=.01, beta1=.1, beta2=.001):
    print 'AdaMax', 'alpha:',alpha,'beta1:',beta1,'beta2:',beta2
    g = T.grad(objective.sum(), w, disconnected_inputs='warn')
    
    new = OrderedDict()
    
    for i in range(len(w)):
        #gi = T.switch(T.isnan(gi),T.zeros_like(gi),gi) #remove NaN's
        mom1 = G.sharedf(w[i].get_value() * 0.)
        _max = G.sharedf(w[i].get_value() * 0.)
        new[mom1] = (1-beta1) * mom1 + beta1 * g[i]
        new[_max] = T.maximum((1-beta2)*_max, abs(g[i]) + 1e-8)
        new[w[i]] = w[i] + alpha *  new[mom1] / new[_max]
                
    return new

# AdaMax that averages over multiple minibatches 
开发者ID:openai,项目名称:iaf,代码行数:19,代码来源:optim.py

示例10: normalize_batch_in_training

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import zeros_like [as 别名]
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    if gamma is None:
        if beta is None:
            gamma = ones_like(x)
        else:
            gamma = ones_like(beta)
    if beta is None:
        if gamma is None:
            beta = zeros_like(x)
        beta = zeros_like(gamma)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2) 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:25,代码来源:theano_backend.py

示例11: batch_normalization

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import zeros_like [as 别名]
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
    """Apply batch normalization on x given mean, var, beta and gamma.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_test is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_test'):
        return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)

    if gamma is None:
        gamma = ones_like(var)
    if beta is None:
        beta = zeros_like(mean)

    if mean.ndim == 1:
        # based on TensorFlow's default: normalize along rightmost dimension
        reduction_axes = list(range(x.ndim - 1))
    else:
        reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]

    return T.nnet.bn.batch_normalization_test(
        x, gamma, beta, mean, var, reduction_axes, epsilon)


# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:27,代码来源:theano_backend.py

示例12: get_aggregator

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import zeros_like [as 别名]
def get_aggregator(self):
        initialized = shared_like(0.)
        expression_acc = shared_like(self.expression)

        # Dummy default expression to use as the previously-accumulated
        # value, that has the same shape as the new result
        expression_zeros = tensor.as_tensor(self.expression).zeros_like()

        conditional_update_expr = self.expression + ifelse(initialized,
                                                           expression_acc,
                                                           expression_zeros)

        initialization_updates = [(expression_acc,
                                   tensor.zeros_like(expression_acc)),
                                  (initialized, 0.)]
        accumulation_updates = [(expression_acc,
                                 conditional_update_expr),
                                (initialized, 1.)]
        aggregator = Aggregator(aggregation_scheme=self,
                                initialization_updates=initialization_updates,
                                accumulation_updates=accumulation_updates,
                                readout_variable=(expression_acc))
        return aggregator 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:25,代码来源:aggregation.py

示例13: batch_normalization

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import zeros_like [as 别名]
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
    """Apply batch normalization on x given mean, var, beta and gamma.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_test is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_test'):
        return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)

    if gamma is None:
        gamma = ones_like(var)
    if beta is None:
        beta = zeros_like(mean)

    if mean.ndim == 1:
        # based on TensorFlow's default: normalize along rightmost dimension
        reduction_axes = list(range(x.ndim - 1))
    else:
        reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]

    return T.nnet.bn.batch_normalization_test(
        x, gamma, beta, mean, var, reduction_axes, epsilon)


# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:27,代码来源:theano_backend.py

示例14: zeros_like

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import zeros_like [as 别名]
def zeros_like(x, name=None):
    return T.zeros_like(x) 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:4,代码来源:theano_backend.py

示例15: categorical_best

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import zeros_like [as 别名]
def categorical_best(tensor):
    """
    tensor should be a tensor of shape (..., categories)
    Return a new tensor of the same shape but one-hot at position of best category
    """
    flat_tensor = tensor.reshape([-1, tensor.shape[-1]])
    argmax_posns = T.argmax(flat_tensor, 1)
    flat_snapped = T.zeros_like(flat_tensor)
    flat_snapped = T.set_subtensor(flat_snapped[T.arange(flat_tensor.shape[0]), argmax_posns], 1.0)
    snapped = flat_snapped.reshape(tensor.shape)
    return snapped 
开发者ID:hexahedria,项目名称:gated-graph-transformer-network,代码行数:13,代码来源:util.py


注:本文中的theano.tensor.zeros_like方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。