当前位置: 首页>>代码示例>>Python>>正文


Python tensor.switch方法代码示例

本文整理汇总了Python中theano.tensor.switch方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.switch方法的具体用法?Python tensor.switch怎么用?Python tensor.switch使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.switch方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: reduce_log_sum

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import switch [as 别名]
def reduce_log_sum(tensor, axis=None, guaranteed_finite=False):
    """
    Sum probabilities in the log domain, i.e return
        log(e^vec[0] + e^vec[1] + ...)
        = log(e^x e^(vec[0]-x) + e^x e^(vec[1]-x) + ...)
        = log(e^x [e^(vec[0]-x) + e^(vec[1]-x) + ...])
        = log(e^x) + log(e^(vec[0]-x) + e^(vec[1]-x) + ...)
        = x + log(e^(vec[0]-x) + e^(vec[1]-x) + ...)
    For numerical stability, we choose x = max(vec)
    Note that if x is -inf, that means all values are -inf,
    so the answer should be -inf. In this case, choose x = 0
    """
    maxval = T.max(tensor, axis)
    maxval_full = T.max(tensor, axis, keepdims=True)
    if not guaranteed_finite:
        maxval = T.switch(T.isfinite(maxval), maxval, T.zeros_like(maxval))
        maxval_full = T.switch(T.isfinite(maxval_full), maxval_full, T.zeros_like(maxval_full))
    reduced_sum = T.sum(T.exp(tensor - maxval_full), axis)
    logsum = maxval + T.log(reduced_sum)
    return logsum 
开发者ID:hexahedria,项目名称:gated-graph-transformer-network,代码行数:22,代码来源:util.py

示例2: time_distributed_nonzero_max_pooling

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import switch [as 别名]
def time_distributed_nonzero_max_pooling(x):
    """
    Computes maximum along the first (time) dimension.
    It ignores the mask m.

    In:
        x - input; a 3D tensor
        mask_value - value to mask out, if None then no masking; 
            by default 0.0, 
    """

    import theano.tensor as T

    mask_value=0.0
    x = T.switch(T.eq(x, mask_value), -numpy.inf, x)
    masked_max_x = x.max(axis=1)
    # replace infinities with mask_value
    masked_max_x = T.switch(T.eq(masked_max_x, -numpy.inf), 0, masked_max_x)
    return masked_max_x 
开发者ID:mateuszmalinowski,项目名称:visual_turing_test-tutorial,代码行数:21,代码来源:keras_extensions.py

示例3: time_distributed_masked_max

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import switch [as 别名]
def time_distributed_masked_max(x, m):
    """
    Computes max along the first (time) dimension.

    In:
        x - input; a 3D tensor
        m - mask
        m_value - value for masking
    """
    # place infinities where mask is off
    m_value = 0.0
    tmp = K.switch(K.equal(m, 0.0), -numpy.inf, 0.0)
    x_with_inf = x + K.expand_dims(tmp)
    x_max = K.max(x_with_inf, axis=1) 
    r = K.switch(K.equal(x_max, -numpy.inf), m_value, x_max)
    return r 


## classes  ##

# Transforms existing layers to masked layers 
开发者ID:mateuszmalinowski,项目名称:visual_turing_test-tutorial,代码行数:23,代码来源:keras_extensions.py

示例4: __init__

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import switch [as 别名]
def __init__(self, scale_grads=1, target_scale=.1,
            discriminator_default_input_include_prob = 1.,
            discriminator_input_include_probs=None,
            discriminator_default_input_scale=1.,
            discriminator_input_scales=None,
            generator_default_input_include_prob = 1.,
            generator_default_input_scale=1.,
            inference_default_input_include_prob=None,
            inference_input_include_probs=None,
            inference_default_input_scale=1.,
            inference_input_scales=None,
            init_now_train_generator=True,
            ever_train_discriminator=True,
            ever_train_generator=True,
            ever_train_inference=True,
            no_drop_in_d_for_g=False,
            alternate_g = False):
        self.__dict__.update(locals())
        del self.self
        # These allow you to dynamically switch off training parts.
        # If the corresponding ever_train_* is False, these have
        # no effect.
        self.now_train_generator = sharedX(init_now_train_generator)
        self.now_train_discriminator = sharedX(numpy.array(1., dtype='float32'))
        self.now_train_inference = sharedX(numpy.array(1., dtype='float32')) 
开发者ID:goodfeli,项目名称:adversarial,代码行数:27,代码来源:__init__.py

示例5: shared_dropout_layer

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import switch [as 别名]
def shared_dropout_layer(shape, use_noise, trng, value, scaled=True):
    #re-scale dropout at training time, so we don't need to at test time
    if scaled:
        proj = tensor.switch(
            use_noise,
            trng.binomial(shape, p=value, n=1,
                                        dtype='float32')/value,
            theano.shared(numpy.float32(1.)))
    else:
        proj = tensor.switch(
            use_noise,
            trng.binomial(shape, p=value, n=1,
                                        dtype='float32'),
            theano.shared(numpy.float32(value)))
    return proj


# feedforward layer: affine transformation + point-wise nonlinearity 
开发者ID:thompsonb,项目名称:DL4MT,代码行数:20,代码来源:layers.py

示例6: AdaMax

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import switch [as 别名]
def AdaMax(w, objective, alpha=.01, beta1=.1, beta2=.001):
    print 'AdaMax', 'alpha:',alpha,'beta1:',beta1,'beta2:',beta2
    g = T.grad(objective.sum(), w, disconnected_inputs='warn')
    
    new = OrderedDict()
    
    for i in range(len(w)):
        #gi = T.switch(T.isnan(gi),T.zeros_like(gi),gi) #remove NaN's
        mom1 = G.sharedf(w[i].get_value() * 0.)
        _max = G.sharedf(w[i].get_value() * 0.)
        new[mom1] = (1-beta1) * mom1 + beta1 * g[i]
        new[_max] = T.maximum((1-beta2)*_max, abs(g[i]) + 1e-8)
        new[w[i]] = w[i] + alpha *  new[mom1] / new[_max]
                
    return new

# AdaMax that averages over multiple minibatches 
开发者ID:openai,项目名称:iaf,代码行数:19,代码来源:optim.py

示例7: sgdmgc

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import switch [as 别名]
def sgdmgc(cost, params, lr=1.0, alpha=0.1, max_magnitude=5.0, infDecay=0.1):
    """SGD with momentum and gradient clipping"""
    grads = T.grad(cost=cost, wrt=params)
    updates = []

    norm = norm_gs(params, grads)
    sqrtnorm = T.sqrt(norm)
    not_finite = T.or_(T.isnan(sqrtnorm), T.isinf(sqrtnorm))
    adj_norm_gs = T.switch(T.ge(sqrtnorm, max_magnitude), max_magnitude / sqrtnorm, 1.)

    for p, g in zip(params, grads):
        v = shared(p.get_value() * 0.)
        g = T.switch(not_finite, infDecay * p, g * adj_norm_gs)
        v_new = v * (1.0 - alpha) - alpha * lr * g
        updates.append((v, v_new))
        updates.append((p, p + v_new ))
    
    return updates, norm 
开发者ID:Ivaylo-Popov,项目名称:Theano-Lights,代码行数:20,代码来源:toolbox.py

示例8: switch

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import switch [as 别名]
def switch(condition, then_expression, else_expression):
    """Switches between two operations depending on a scalar value.

    Note that both `then_expression` and `else_expression`
    should be symbolic tensors of the *same shape*.

    # Arguments
        condition: scalar tensor (`int` or `bool`).
        then_expression: either a tensor, or a callable that returns a tensor.
        else_expression: either a tensor, or a callable that returns a tensor.

    # Returns
        The selected tensor.
    """
    if callable(then_expression):
        then_expression = then_expression()
    if callable(else_expression):
        else_expression = else_expression()
    cond_ndim = ndim(condition)
    expr_ndim = ndim(then_expression)
    if cond_ndim < expr_ndim:
        ndim_diff = expr_ndim - cond_ndim
        for _ in range(ndim_diff):
            condition = expand_dims(condition)
    return T.switch(condition, then_expression, else_expression) 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:27,代码来源:theano_backend.py

示例9: bound

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import switch [as 别名]
def bound(expression, out, *predicates):
    """Bound a theano expression.

    Parameters
    ----------
    * `expression` [theano expression]:
        The expression to bound.

    * `out` [theano expression]:
        The out-of-bounds value.

    * `*predicates` [list of theano expressions]:
        The list of predicates defining the boundaries of `expression`.

    Returns
    -------
    * `value` [theano expression]:
         The bounded expression.
    """
    guard = 1

    for p in predicates:
        guard *= p

    return T.switch(guard, expression, out) 
开发者ID:diana-hep,项目名称:carl,代码行数:27,代码来源:base.py

示例10: reScale

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import switch [as 别名]
def reScale(self,energy,threshold,replaceValue=1e-7):
#{{{
        assert energy.ndim==1;
        maxValue=energy.max();
        def checkThreshold(value,threshold,replaceValue):
            return T.switch(T.lt(value,threshold),replaceValue,value);
        result,update=theano.scan(fn=checkThreshold,
                                 outputs_info=None,
                                 sequences=[energy],
                                 non_sequences=[threshold,replaceValue]);
        return T.switch(T.lt(maxValue,threshold),energy,result);
#}}} 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:14,代码来源:nn.py

示例11: switch

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import switch [as 别名]
def switch(condition, then_expression, else_expression):
    '''condition: scalar tensor.
    '''
    return T.switch(condition, then_expression, else_expression) 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:6,代码来源:theano_backend.py

示例12: local_gpua_row_switch

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import switch [as 别名]
def local_gpua_row_switch(node):
    """
    Detects eligible Switch instances and replaces them with a GPU
    row switch.
    """

    if (node.op.__class__ == T.Elemwise
        and node.op.scalar_op.__class__ != theano.scalar.Switch):
        return False

    cond, ift, iff = node.inputs
    out, = node.outputs

    # Only applies to Switch instances where a vector mask broadcasts over
    # matrices.
    bcast = cond.broadcastable
    if not bcast or not (not bcast[0] and all(bcast[1:])
                         and ift.ndim in [2, 3]):
        return False

    if not (ift.dtype == iff.dtype == "float32"):
        return False

    if cond.owner and isinstance(cond.owner.op, HostFromGpu):
        gpu_cond, = cond.owner.inputs
    else:
        gpu_cond = as_cuda_ndarray_variable(
                T.cast(cond.flatten(), "float32"))

    if ift.owner and isinstance(ift.owner.op, HostFromGpu):
        gpu_ift, = ift.owner.inputs
    else:
        gpu_ift = as_cuda_ndarray_variable(ift)

    if iff.owner and isinstance(iff.owner.op, HostFromGpu):
        gpu_iff, = iff.owner.inputs
    else:
        gpu_iff = as_cuda_ndarray_variable(iff)

    gpu_op = GpuRowSwitch()
    return [HostFromGpu()(gpu_op(cond, gpu_ift, gpu_iff))] 
开发者ID:stanfordnlp,项目名称:spinn,代码行数:43,代码来源:cuda.py

示例13: fprop

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import switch [as 别名]
def fprop(self, state_below):
        p = state_below
        p = T.switch(p > 0., p, self.left_slope * p)
        return p 
开发者ID:goodfeli,项目名称:adversarial,代码行数:6,代码来源:__init__.py

示例14: switch

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import switch [as 别名]
def switch(condition, then_expression, else_expression):
    '''condition: scalar tensor.
    '''
    return T.switch(condition, then_expression, else_expression)


# NN OPERATIONS 
开发者ID:mathDR,项目名称:reading-text-in-the-wild,代码行数:9,代码来源:theano_backend.py

示例15: clip_norm

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import switch [as 别名]
def clip_norm(g, c, n):
    if c > 0:
        g = T.switch(T.ge(n, c), g * c / n, g)
    return g 
开发者ID:junyanz,项目名称:iGAN,代码行数:6,代码来源:updates.py


注:本文中的theano.tensor.switch方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。