當前位置: 首頁>>代碼示例>>Python>>正文


Python tensor.switch方法代碼示例

本文整理匯總了Python中theano.tensor.switch方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.switch方法的具體用法?Python tensor.switch怎麽用?Python tensor.switch使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.switch方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: reduce_log_sum

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import switch [as 別名]
def reduce_log_sum(tensor, axis=None, guaranteed_finite=False):
    """
    Sum probabilities in the log domain, i.e return
        log(e^vec[0] + e^vec[1] + ...)
        = log(e^x e^(vec[0]-x) + e^x e^(vec[1]-x) + ...)
        = log(e^x [e^(vec[0]-x) + e^(vec[1]-x) + ...])
        = log(e^x) + log(e^(vec[0]-x) + e^(vec[1]-x) + ...)
        = x + log(e^(vec[0]-x) + e^(vec[1]-x) + ...)
    For numerical stability, we choose x = max(vec)
    Note that if x is -inf, that means all values are -inf,
    so the answer should be -inf. In this case, choose x = 0
    """
    maxval = T.max(tensor, axis)
    maxval_full = T.max(tensor, axis, keepdims=True)
    if not guaranteed_finite:
        maxval = T.switch(T.isfinite(maxval), maxval, T.zeros_like(maxval))
        maxval_full = T.switch(T.isfinite(maxval_full), maxval_full, T.zeros_like(maxval_full))
    reduced_sum = T.sum(T.exp(tensor - maxval_full), axis)
    logsum = maxval + T.log(reduced_sum)
    return logsum 
開發者ID:hexahedria,項目名稱:gated-graph-transformer-network,代碼行數:22,代碼來源:util.py

示例2: time_distributed_nonzero_max_pooling

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import switch [as 別名]
def time_distributed_nonzero_max_pooling(x):
    """
    Computes maximum along the first (time) dimension.
    It ignores the mask m.

    In:
        x - input; a 3D tensor
        mask_value - value to mask out, if None then no masking; 
            by default 0.0, 
    """

    import theano.tensor as T

    mask_value=0.0
    x = T.switch(T.eq(x, mask_value), -numpy.inf, x)
    masked_max_x = x.max(axis=1)
    # replace infinities with mask_value
    masked_max_x = T.switch(T.eq(masked_max_x, -numpy.inf), 0, masked_max_x)
    return masked_max_x 
開發者ID:mateuszmalinowski,項目名稱:visual_turing_test-tutorial,代碼行數:21,代碼來源:keras_extensions.py

示例3: time_distributed_masked_max

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import switch [as 別名]
def time_distributed_masked_max(x, m):
    """
    Computes max along the first (time) dimension.

    In:
        x - input; a 3D tensor
        m - mask
        m_value - value for masking
    """
    # place infinities where mask is off
    m_value = 0.0
    tmp = K.switch(K.equal(m, 0.0), -numpy.inf, 0.0)
    x_with_inf = x + K.expand_dims(tmp)
    x_max = K.max(x_with_inf, axis=1) 
    r = K.switch(K.equal(x_max, -numpy.inf), m_value, x_max)
    return r 


## classes  ##

# Transforms existing layers to masked layers 
開發者ID:mateuszmalinowski,項目名稱:visual_turing_test-tutorial,代碼行數:23,代碼來源:keras_extensions.py

示例4: __init__

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import switch [as 別名]
def __init__(self, scale_grads=1, target_scale=.1,
            discriminator_default_input_include_prob = 1.,
            discriminator_input_include_probs=None,
            discriminator_default_input_scale=1.,
            discriminator_input_scales=None,
            generator_default_input_include_prob = 1.,
            generator_default_input_scale=1.,
            inference_default_input_include_prob=None,
            inference_input_include_probs=None,
            inference_default_input_scale=1.,
            inference_input_scales=None,
            init_now_train_generator=True,
            ever_train_discriminator=True,
            ever_train_generator=True,
            ever_train_inference=True,
            no_drop_in_d_for_g=False,
            alternate_g = False):
        self.__dict__.update(locals())
        del self.self
        # These allow you to dynamically switch off training parts.
        # If the corresponding ever_train_* is False, these have
        # no effect.
        self.now_train_generator = sharedX(init_now_train_generator)
        self.now_train_discriminator = sharedX(numpy.array(1., dtype='float32'))
        self.now_train_inference = sharedX(numpy.array(1., dtype='float32')) 
開發者ID:goodfeli,項目名稱:adversarial,代碼行數:27,代碼來源:__init__.py

示例5: shared_dropout_layer

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import switch [as 別名]
def shared_dropout_layer(shape, use_noise, trng, value, scaled=True):
    #re-scale dropout at training time, so we don't need to at test time
    if scaled:
        proj = tensor.switch(
            use_noise,
            trng.binomial(shape, p=value, n=1,
                                        dtype='float32')/value,
            theano.shared(numpy.float32(1.)))
    else:
        proj = tensor.switch(
            use_noise,
            trng.binomial(shape, p=value, n=1,
                                        dtype='float32'),
            theano.shared(numpy.float32(value)))
    return proj


# feedforward layer: affine transformation + point-wise nonlinearity 
開發者ID:thompsonb,項目名稱:DL4MT,代碼行數:20,代碼來源:layers.py

示例6: AdaMax

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import switch [as 別名]
def AdaMax(w, objective, alpha=.01, beta1=.1, beta2=.001):
    print 'AdaMax', 'alpha:',alpha,'beta1:',beta1,'beta2:',beta2
    g = T.grad(objective.sum(), w, disconnected_inputs='warn')
    
    new = OrderedDict()
    
    for i in range(len(w)):
        #gi = T.switch(T.isnan(gi),T.zeros_like(gi),gi) #remove NaN's
        mom1 = G.sharedf(w[i].get_value() * 0.)
        _max = G.sharedf(w[i].get_value() * 0.)
        new[mom1] = (1-beta1) * mom1 + beta1 * g[i]
        new[_max] = T.maximum((1-beta2)*_max, abs(g[i]) + 1e-8)
        new[w[i]] = w[i] + alpha *  new[mom1] / new[_max]
                
    return new

# AdaMax that averages over multiple minibatches 
開發者ID:openai,項目名稱:iaf,代碼行數:19,代碼來源:optim.py

示例7: sgdmgc

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import switch [as 別名]
def sgdmgc(cost, params, lr=1.0, alpha=0.1, max_magnitude=5.0, infDecay=0.1):
    """SGD with momentum and gradient clipping"""
    grads = T.grad(cost=cost, wrt=params)
    updates = []

    norm = norm_gs(params, grads)
    sqrtnorm = T.sqrt(norm)
    not_finite = T.or_(T.isnan(sqrtnorm), T.isinf(sqrtnorm))
    adj_norm_gs = T.switch(T.ge(sqrtnorm, max_magnitude), max_magnitude / sqrtnorm, 1.)

    for p, g in zip(params, grads):
        v = shared(p.get_value() * 0.)
        g = T.switch(not_finite, infDecay * p, g * adj_norm_gs)
        v_new = v * (1.0 - alpha) - alpha * lr * g
        updates.append((v, v_new))
        updates.append((p, p + v_new ))
    
    return updates, norm 
開發者ID:Ivaylo-Popov,項目名稱:Theano-Lights,代碼行數:20,代碼來源:toolbox.py

示例8: switch

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import switch [as 別名]
def switch(condition, then_expression, else_expression):
    """Switches between two operations depending on a scalar value.

    Note that both `then_expression` and `else_expression`
    should be symbolic tensors of the *same shape*.

    # Arguments
        condition: scalar tensor (`int` or `bool`).
        then_expression: either a tensor, or a callable that returns a tensor.
        else_expression: either a tensor, or a callable that returns a tensor.

    # Returns
        The selected tensor.
    """
    if callable(then_expression):
        then_expression = then_expression()
    if callable(else_expression):
        else_expression = else_expression()
    cond_ndim = ndim(condition)
    expr_ndim = ndim(then_expression)
    if cond_ndim < expr_ndim:
        ndim_diff = expr_ndim - cond_ndim
        for _ in range(ndim_diff):
            condition = expand_dims(condition)
    return T.switch(condition, then_expression, else_expression) 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:27,代碼來源:theano_backend.py

示例9: bound

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import switch [as 別名]
def bound(expression, out, *predicates):
    """Bound a theano expression.

    Parameters
    ----------
    * `expression` [theano expression]:
        The expression to bound.

    * `out` [theano expression]:
        The out-of-bounds value.

    * `*predicates` [list of theano expressions]:
        The list of predicates defining the boundaries of `expression`.

    Returns
    -------
    * `value` [theano expression]:
         The bounded expression.
    """
    guard = 1

    for p in predicates:
        guard *= p

    return T.switch(guard, expression, out) 
開發者ID:diana-hep,項目名稱:carl,代碼行數:27,代碼來源:base.py

示例10: reScale

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import switch [as 別名]
def reScale(self,energy,threshold,replaceValue=1e-7):
#{{{
        assert energy.ndim==1;
        maxValue=energy.max();
        def checkThreshold(value,threshold,replaceValue):
            return T.switch(T.lt(value,threshold),replaceValue,value);
        result,update=theano.scan(fn=checkThreshold,
                                 outputs_info=None,
                                 sequences=[energy],
                                 non_sequences=[threshold,replaceValue]);
        return T.switch(T.lt(maxValue,threshold),energy,result);
#}}} 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:14,代碼來源:nn.py

示例11: switch

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import switch [as 別名]
def switch(condition, then_expression, else_expression):
    '''condition: scalar tensor.
    '''
    return T.switch(condition, then_expression, else_expression) 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:6,代碼來源:theano_backend.py

示例12: local_gpua_row_switch

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import switch [as 別名]
def local_gpua_row_switch(node):
    """
    Detects eligible Switch instances and replaces them with a GPU
    row switch.
    """

    if (node.op.__class__ == T.Elemwise
        and node.op.scalar_op.__class__ != theano.scalar.Switch):
        return False

    cond, ift, iff = node.inputs
    out, = node.outputs

    # Only applies to Switch instances where a vector mask broadcasts over
    # matrices.
    bcast = cond.broadcastable
    if not bcast or not (not bcast[0] and all(bcast[1:])
                         and ift.ndim in [2, 3]):
        return False

    if not (ift.dtype == iff.dtype == "float32"):
        return False

    if cond.owner and isinstance(cond.owner.op, HostFromGpu):
        gpu_cond, = cond.owner.inputs
    else:
        gpu_cond = as_cuda_ndarray_variable(
                T.cast(cond.flatten(), "float32"))

    if ift.owner and isinstance(ift.owner.op, HostFromGpu):
        gpu_ift, = ift.owner.inputs
    else:
        gpu_ift = as_cuda_ndarray_variable(ift)

    if iff.owner and isinstance(iff.owner.op, HostFromGpu):
        gpu_iff, = iff.owner.inputs
    else:
        gpu_iff = as_cuda_ndarray_variable(iff)

    gpu_op = GpuRowSwitch()
    return [HostFromGpu()(gpu_op(cond, gpu_ift, gpu_iff))] 
開發者ID:stanfordnlp,項目名稱:spinn,代碼行數:43,代碼來源:cuda.py

示例13: fprop

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import switch [as 別名]
def fprop(self, state_below):
        p = state_below
        p = T.switch(p > 0., p, self.left_slope * p)
        return p 
開發者ID:goodfeli,項目名稱:adversarial,代碼行數:6,代碼來源:__init__.py

示例14: switch

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import switch [as 別名]
def switch(condition, then_expression, else_expression):
    '''condition: scalar tensor.
    '''
    return T.switch(condition, then_expression, else_expression)


# NN OPERATIONS 
開發者ID:mathDR,項目名稱:reading-text-in-the-wild,代碼行數:9,代碼來源:theano_backend.py

示例15: clip_norm

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import switch [as 別名]
def clip_norm(g, c, n):
    if c > 0:
        g = T.switch(T.ge(n, c), g * c / n, g)
    return g 
開發者ID:junyanz,項目名稱:iGAN,代碼行數:6,代碼來源:updates.py


注:本文中的theano.tensor.switch方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。