當前位置: 首頁>>代碼示例>>Python>>正文


Python tensor.maximum方法代碼示例

本文整理匯總了Python中theano.tensor.maximum方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.maximum方法的具體用法?Python tensor.maximum怎麽用?Python tensor.maximum使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.maximum方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: ctc_update_log_p

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import maximum [as 別名]
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:26,代碼來源:theano_backend.py

示例2: define_cost

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import maximum [as 別名]
def define_cost(self, pred, y0, m0):
        bsize = self.bsize
        npix = int(np.prod(test_shape(y0)[1:]))
        y0_target = y0.reshape((self.bsize, npix))
        y0_mask = m0.reshape((self.bsize, npix))
        pred = pred.reshape((self.bsize, npix))

        p = pred * y0_mask
        t = y0_target * y0_mask

        d = (p - t)

        nvalid_pix = T.sum(y0_mask, axis=1)
        depth_cost = (T.sum(nvalid_pix * T.sum(d**2, axis=1))
                         - 0.5*T.sum(T.sum(d, axis=1)**2)) \
                     / T.maximum(T.sum(nvalid_pix**2), 1)

        return depth_cost 
開發者ID:hjimce,項目名稱:Depth-Map-Prediction,代碼行數:20,代碼來源:depth.py

示例3: get_noise

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import maximum [as 別名]
def get_noise(self, size):

        # Allow just requesting batch size
        if isinstance(size, int):
            size = (size, self.get_input_space().get_total_dimension())

        if not hasattr(self, 'noise'):
            self.noise = "gaussian"
        if self.noise == "uniform":
            return self.theano_rng.uniform(low=-np.sqrt(3), high=np.sqrt(3), size=size, dtype='float32')
        elif self.noise == "gaussian":
            return self.theano_rng.normal(size=size, dtype='float32')
        elif self.noise == "spherical":
            noise = self.theano_rng.normal(size=size, dtype='float32')
            noise = noise / T.maximum(1e-7, T.sqrt(T.sqr(noise).sum(axis=1))).dimshuffle(0, 'x')
            return noise
        else:
            raise NotImplementedError(self.noise) 
開發者ID:goodfeli,項目名稱:adversarial,代碼行數:20,代碼來源:__init__.py

示例4: max_pool_2d_same_size

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import maximum [as 別名]
def max_pool_2d_same_size(input, patch_size):
    """
    Takes as input a 4-D tensor. It sets all non maximum values
    of non-overlapping patches of size (patch_size[0],patch_size[1]) to zero,
    keeping only the maximum values. The output has the same dimensions as
    the input.

    Parameters
    ----------
    input : 4-D theano tensor of input images
        Input images. Max pooling will be done over the 2 last dimensions.
    patch_size : tuple of length 2
        Size of the patch (patch height, patch width).
        (2,2) will retain only one non-zero value per patch of 4 values.

    """
    output = Pool(patch_size, True)(input)
    outs = MaxPoolGrad(patch_size, True)(input, output, output)
    return outs 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:21,代碼來源:pool.py

示例5: compute_emb

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import maximum [as 別名]
def compute_emb(x, W):

    def _step(xi, emb, W):
        if prm.att_doc:
            new_shape = (xi.shape[0], xi.shape[1], xi.shape[2], prm.dim_emb)
        else:
            new_shape = (xi.shape[0], xi.shape[1], prm.dim_emb)

        out = W[xi.flatten()].reshape(new_shape).sum(-2)
        return out / tensor.maximum(1., tensor.neq(xi,-1).astype('float32').sum(-1, keepdims=True))

    if prm.att_doc:
        emb_init = tensor.alloc(0., x.shape[1], x.shape[2], prm.dim_emb)
    else:
        emb_init = tensor.alloc(0., x.shape[1], prm.dim_emb)

    (embs), scan_updates = theano.scan(_step,
                                sequences=[x],
                                outputs_info=[emb_init],
                                non_sequences=[W],
                                name='emb_scan',
                                n_steps=x.shape[0])

    return embs 
開發者ID:nyu-dl,項目名稱:dl4ir-webnav,代碼行數:26,代碼來源:neuagent.py

示例6: adamax_updates

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import maximum [as 別名]
def adamax_updates(params, cost, lr=0.001, mom1=0.9, mom2=0.999):
    updates = []
    grads = T.grad(cost, params)
    for p, g in zip(params, grads):
        mg = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
        v = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
        if mom1>0:
            v_t = mom1*v + (1. - mom1)*g
            updates.append((v,v_t))
        else:
            v_t = g
        mg_t = T.maximum(mom2*mg, abs(g))
        g_t = v_t / (mg_t + 1e-6)
        p_t = p - lr * g_t
        updates.append((mg, mg_t))
        updates.append((p, p_t))
    return updates 
開發者ID:hendrycks,項目名稱:GELUs,代碼行數:19,代碼來源:nn.py

示例7: AdaMax

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import maximum [as 別名]
def AdaMax(w, objective, alpha=.01, beta1=.1, beta2=.001):
    print 'AdaMax', 'alpha:',alpha,'beta1:',beta1,'beta2:',beta2
    g = T.grad(objective.sum(), w, disconnected_inputs='warn')
    
    new = OrderedDict()
    
    for i in range(len(w)):
        #gi = T.switch(T.isnan(gi),T.zeros_like(gi),gi) #remove NaN's
        mom1 = G.sharedf(w[i].get_value() * 0.)
        _max = G.sharedf(w[i].get_value() * 0.)
        new[mom1] = (1-beta1) * mom1 + beta1 * g[i]
        new[_max] = T.maximum((1-beta2)*_max, abs(g[i]) + 1e-8)
        new[w[i]] = w[i] + alpha *  new[mom1] / new[_max]
                
    return new

# AdaMax that averages over multiple minibatches 
開發者ID:openai,項目名稱:iaf,代碼行數:19,代碼來源:optim.py

示例8: my_max_pool_2d

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import maximum [as 別名]
def my_max_pool_2d(sym_input, pool_shape = (2,2)):
    """ this one is pure theano. Hence all gradient-related stuff is working! No dimshuffling"""

    s = None
    for i in xrange(pool_shape[1]):
        t = sym_input[:,:,:,i::pool_shape[1]]
        if s is None:
            s = t
        else:
            s = T.maximum(s, t)

    temp = s
    s = None
    for i in xrange(pool_shape[0]):
        t = temp[:,:,i::pool_shape[0],:]
        if s is None:
            s = t
        else:
            s = T.maximum(s, t)

    sym_ret = s

    return sym_ret 
開發者ID:GUR9000,項目名稱:Deep_MRI_brain_extraction,代碼行數:25,代碼來源:NN_ConvLayer_2D.py

示例9: max_pool_along_channel_axis

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import maximum [as 別名]
def max_pool_along_channel_axis(sym_input, pool_factor):
    """ for 3D conv."""
    s = None
    for i in xrange(pool_factor):
        t = sym_input[:,:,i::pool_factor]
        if s is None:
            s = t
        else:
            s = T.maximum(s, t)
    return s
#    Ns, Ts, C, Hs, Ws = 1, 70, 1, 70, 70  -> 70^3
#    Nf, Tf, C, Hf, Wf = 32, 5 , 1, 5 , 5  -> 32 filters of shape 5^3
#    signals = numpy.arange(Ns*Ts*C*Hs*Ws).reshape(Ns, Ts, C, Hs, Ws).astype('float32')
#    filters = numpy.arange(Nf*Tf*C*Hf*Wf).reshape(Nf, Tf, C, Hf, Wf).astype('float32')
#
# in 3D
#        input:  (1, 70,  3, 70, 70)
#       filters: (32, 5 , 3,  5 , 5)
#    --> output: (1, 66, 32, 66, 66) 
開發者ID:GUR9000,項目名稱:Deep_MRI_brain_extraction,代碼行數:21,代碼來源:NN_ConvLayer_3D.py

示例10: compute_weights

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import maximum [as 別名]
def compute_weights(self, energies, attended_mask):
        if self.energy_normalizer == 'softmax':
            logger.debug("Using softmax attention weights normalization")
            energies = energies - energies.max(axis=0)
            unnormalized_weights = tensor.exp(energies)
        elif self.energy_normalizer == 'logistic':
            logger.debug("Using smoothfocus (logistic sigm) "
                        "attention weights normalization")
            unnormalized_weights = tensor.nnet.sigmoid(energies)
        elif self.energy_normalizer == 'relu':
            logger.debug("Using ReLU attention weights normalization")
            unnormalized_weights = tensor.maximum(energies/1000., 0.0)
        else:
            raise Exception("Unknown energey_normalizer: {}"
                            .format(self.energy_computer))
        if attended_mask:
            unnormalized_weights *= attended_mask

        # If mask consists of all zeros use 1 as the normalization coefficient
        normalization = (unnormalized_weights.sum(axis=0) +
                         tensor.all(1 - attended_mask, axis=0))
        return unnormalized_weights / normalization 
開發者ID:rizar,項目名稱:attention-lvcsr,代碼行數:24,代碼來源:attention.py

示例11: maximum

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import maximum [as 別名]
def maximum(x, y):
    return T.maximum(x, y) 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:4,代碼來源:theano_backend.py

示例12: maximum

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import maximum [as 別名]
def maximum(x, y):
    if checkgrad:
        return x + y
    return T.maximum(x, y) 
開發者ID:hjimce,項目名稱:Depth-Map-Prediction,代碼行數:6,代碼來源:thutil.py

示例13: HeKaimingResidualLayerSet

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import maximum [as 別名]
def HeKaimingResidualLayerSet(inp, inp_dim, vs, training_mode, name="resnet_stack", dropout_keep_rate=1.0, depth=2, initializer=None):
    # From http://arxiv.org/pdf/1603.05027v2.pdf
    addin = inp
    for i in range(depth):
        addin = BatchNorm(addin, inp_dim, vs, name + "/" + str(i), training_mode)
        if dropout_keep_rate < 1.0:
            addin = Dropout(addin, dropout_keep_rate, training_mode) 
        addin = T.maximum(addin, 0) # ReLU
        addin = Linear(addin, inp_dim, inp_dim, vs, name=name + "/" + str(i), initializer=initializer)
    return inp + addin 
開發者ID:stanfordnlp,項目名稱:spinn,代碼行數:12,代碼來源:blocks.py

示例14: ReLULayer

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import maximum [as 別名]
def ReLULayer(inp, inp_dim, outp_dim, vs, name="relu_layer", use_bias=True, initializer=None):
    pre_nl = Linear(inp, inp_dim, outp_dim, vs, name, use_bias, initializer)
    # ReLU isn't present in this version of Theano.
    outp = T.maximum(pre_nl, 0)

    return outp 
開發者ID:stanfordnlp,項目名稱:spinn,代碼行數:8,代碼來源:blocks.py

示例15: relu

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import maximum [as 別名]
def relu(self, X):
        return T.maximum(X, 0) 
開發者ID:mquad,項目名稱:hgru4rec,代碼行數:4,代碼來源:hgru4rec.py


注:本文中的theano.tensor.maximum方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。