當前位置: 首頁>>代碼示例>>Python>>正文


Python tensor.exp方法代碼示例

本文整理匯總了Python中theano.tensor.exp方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.exp方法的具體用法?Python tensor.exp怎麽用?Python tensor.exp使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.exp方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_output_for

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import exp [as 別名]
def get_output_for(self, input, init=False, **kwargs):
        if input.ndim > 2:
            # if the input has more than two dimensions, flatten it into a
            # batch of feature vectors.
            input = input.flatten(2)
        
        activation = T.tensordot(input, self.W, [[1], [0]])
        abs_dif = (T.sum(abs(activation.dimshuffle(0,1,2,'x') - activation.dimshuffle('x',1,2,0)),axis=2)
                    + 1e6 * T.eye(input.shape[0]).dimshuffle(0,'x',1))

        if init:
            mean_min_abs_dif = 0.5 * T.mean(T.min(abs_dif, axis=2),axis=0)
            abs_dif /= mean_min_abs_dif.dimshuffle('x',0,'x')
            self.init_updates = [(self.log_weight_scale, self.log_weight_scale-T.log(mean_min_abs_dif).dimshuffle(0,'x'))]
        
        f = T.sum(T.exp(-abs_dif),axis=2)

        if init:
            mf = T.mean(f,axis=0)
            f -= mf.dimshuffle('x',0)
            self.init_updates.append((self.b, -mf))
        else:
            f += self.b.dimshuffle('x',0)

        return T.concatenate([input, f], axis=1) 
開發者ID:djsutherland,項目名稱:opt-mmd,代碼行數:27,代碼來源:nn.py

示例2: rbf_mmd2_streaming_and_ratio

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import exp [as 別名]
def rbf_mmd2_streaming_and_ratio(X, Y, sigma=0):
    # n = (T.smallest(X.shape[0], Y.shape[0]) // 2) * 2
    n = (X.shape[0] // 2) * 2
    gamma = 1 / (2 * sigma**2)
    rbf = lambda A, B: T.exp(-gamma * ((A - B) ** 2).sum(axis=1))
    h_bits = (rbf(X[:n:2], X[1:n:2]) + rbf(Y[:n:2], Y[1:n:2])
            - rbf(X[:n:2], Y[1:n:2]) - rbf(X[1:n:2], Y[:n:2]))

    mmd2 = h_bits.mean()

    # variance is 1/2 E_{v, v'} (h(v) - h(v'))^2
    # estimate with even, odd diffs
    m = (n // 2) * 2
    approx_var = 1/2 * ((h_bits[:m:2] - h_bits[1:m:2]) ** 2).mean()
    ratio = mmd2 / T.sqrt(T.largest(approx_var, _eps))
    return mmd2, ratio


################################################################################
### MMD with linear kernel

# Hotelling test statistic is from:
#    Jitkrittum, Szabo, Chwialkowski, and Gretton.
#    Interpretable Distribution Features with Maximum Testing Power.
#    NIPS 2015. 
開發者ID:djsutherland,項目名稱:opt-mmd,代碼行數:27,代碼來源:mmd.py

示例3: __init__

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import exp [as 別名]
def __init__(self,classes,hidden_layers,features,nodes_per_hidden_layer,learning_rate,regularization):
        self.hidden_layers = []
        self.hidden_layers.append(layer(features,nodes_per_hidden_layer))
        for i in range(hidden_layers-1):
            self.hidden_layers.append(layer(nodes_per_hidden_layer,nodes_per_hidden_layer))
        self.output_layer = layer(nodes_per_hidden_layer,classes)
        self.params = []
        for l in self.hidden_layers:
            self.params.extend(l.get_params())
        self.params.extend(self.output_layer.get_params())
        self.A = T.matrix()
        self.t = T.matrix()
        self.s = 1/(1+T.exp(-T.dot(self.A,self.params[0])-self.params[1]))
        for i in range(hidden_layers):
            self.s = 1/(1+T.exp(-T.dot(self.s,self.params[2*(i+1)])-self.params[2*(i+1)+1]))
        self.cost = -self.t*T.log(self.s)-(1-self.t)*T.log(1-self.s)
        self.cost = self.cost.mean()
        for i in range(hidden_layers+1):
            self.cost += regularization*(self.params[2*i]**2).mean()
        self.gparams = [T.grad(self.cost, param) for param in self.params]
        self.propogate = theano.function([self.A,self.t],self.cost,updates=[(param,param-learning_rate*gparam) for param,gparam in zip(self.params,self.gparams)],allow_input_downcast=True)
        self.classify = theano.function([self.A],self.s,allow_input_downcast=True) 
開發者ID:iamshang1,項目名稱:Projects,代碼行數:24,代碼來源:theano_nn.py

示例4: timeit_2vector_theano

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import exp [as 別名]
def timeit_2vector_theano(init, nb_element=1e6, nb_repeat=3, nb_call=int(1e2), expr="a**2 + b**2 + 2*a*b"):
    t3 = timeit.Timer("tf(av,bv)",
                      """
import theano
import theano.tensor as T
import numexpr as ne
from theano.tensor import exp
%(init)s
av=a
bv=b
a=T.dvector()
b=T.dvector()
tf= theano.function([a,b],%(expr)s)
"""%locals()
)
    ret=t3.repeat(nb_repeat,nb_call)
    return np.asarray(ret) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:19,代碼來源:gen_graph.py

示例5: test_basic

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import exp [as 別名]
def test_basic(self):
        c = T.matrix()
        p_y = T.exp(c) / T.exp(c).sum(axis=1).dimshuffle(0, 'x')

        # test that function contains softmax and no div.
        f = theano.function([c], p_y, mode=self.mode)

        assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')

        f_ops = [n.op for n in f.maker.fgraph.toposort()]
        # print '--- f ='
        # printing.debugprint(f)
        # print '==='
        assert len(f_ops) == 1
        assert softmax_op in f_ops
        f(self.rng.rand(3, 4).astype(config.floatX)) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:18,代碼來源:test_nnet.py

示例6: test_basic_keepdims

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import exp [as 別名]
def test_basic_keepdims(self):
        c = T.matrix()
        p_y = T.exp(c) / T.exp(c).sum(axis=1, keepdims=True)

        # test that function contains softmax and no div.
        f = theano.function([c], p_y, mode=self.mode)

        assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')

        f_ops = [n.op for n in f.maker.fgraph.toposort()]
        # print '--- f ='
        # printing.debugprint(f)
        # print '==='
        assert len(f_ops) == 1
        assert softmax_op in f_ops
        f(self.rng.rand(3, 4).astype(config.floatX)) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:18,代碼來源:test_nnet.py

示例7: test_grad

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import exp [as 別名]
def test_grad(self):
        c = T.matrix()
        p_y = T.exp(c) / T.exp(c).sum(axis=1).dimshuffle(0, 'x')

        # test that function contains softmax and softmaxgrad
        w = T.matrix()
        backup = config.warn.sum_div_dimshuffle_bug
        config.warn.sum_div_dimshuffle_bug = False
        try:
            g = theano.function([c, w], T.grad((p_y * w).sum(), c))
            hasattr(g.maker.fgraph.outputs[0].tag, 'trace')
        finally:
            config.warn.sum_div_dimshuffle_bug = backup
        g_ops = [n.op for n in g.maker.fgraph.toposort()]
        # print '--- g ='
        # printing.debugprint(g)
        # print '==='

        raise SkipTest('Optimization not enabled for the moment')
        assert len(g_ops) == 2
        assert softmax_op in g_ops
        assert softmax_grad in g_ops
        g(self.rng.rand(3, 4), self.rng.uniform(.5, 1, (3, 4))) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:25,代碼來源:test_nnet.py

示例8: test_transpose_basic

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import exp [as 別名]
def test_transpose_basic(self):
        # this should be a transposed softmax
        c = T.matrix()
        p_y = T.exp(c) / T.exp(c).sum(axis=0)

        # test that function contains softmax and no div.
        f = theano.function([c], p_y)
        # printing.debugprint(f)

        # test that function contains softmax and no div.
        backup = config.warn.sum_div_dimshuffle_bug
        config.warn.sum_div_dimshuffle_bug = False
        try:
            g = theano.function([c], T.grad(p_y.sum(), c))
            hasattr(g.maker.fgraph.outputs[0].tag, 'trace')
        finally:
            config.warn.sum_div_dimshuffle_bug = backup
        # printing.debugprint(g)
        raise SkipTest('Optimization not enabled for the moment') 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:21,代碼來源:test_nnet.py

示例9: test_broadcast_grad

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import exp [as 別名]
def test_broadcast_grad():
    rng = numpy.random.RandomState(utt.fetch_seed())
    x1 = T.tensor4('x')
    x1_data = rng.randn(1, 1, 300, 300)
    sigma = T.scalar('sigma')
    sigma_data = 20
    window_radius = 3

    filter_1d = T.arange(-window_radius, window_radius+1)
    filter_1d = filter_1d.astype(theano.config.floatX)
    filter_1d = T.exp(-0.5*filter_1d**2/sigma**2)
    filter_1d = filter_1d / filter_1d.sum()

    filter_W = filter_1d.dimshuffle(['x', 'x', 0, 'x'])

    y = theano.tensor.nnet.conv2d(x1, filter_W, border_mode='full',
                                  filter_shape=[1, 1, None, None])
    theano.grad(y.sum(), sigma) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:20,代碼來源:test_conv.py

示例10: test_1msigmoid

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import exp [as 別名]
def test_1msigmoid(self):
        if not register_local_1msigmoid:
            return

        m = self.get_mode()
        x = T.fmatrix()

        # tests exp_over_1_plus_exp
        f = theano.function([x], 1 - T.exp(x) / (1 + T.exp(x)), mode=m)
        assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
        assert [node.op for node in f.maker.fgraph.toposort()] == [
            tensor.neg, sigmoid_inplace]

        # tests inv_1_plus_exp
        f = theano.function([x], 1 - T.fill(x, 1.0) / (1 + T.exp(-x)), mode=m)
        assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
        assert [node.op for node in f.maker.fgraph.toposort()] == [tensor.neg,
                sigmoid_inplace] 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:20,代碼來源:test_sigm.py

示例11: test_is_1pexp

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import exp [as 別名]
def test_is_1pexp(self):
        backup = config.warn.identify_1pexp_bug
        config.warn.identify_1pexp_bug = False
        try:
            x = tensor.vector('x')
            exp = tensor.exp
            assert is_1pexp(1 + exp(x)) == (False, x)
            assert is_1pexp(exp(x) + 1) == (False, x)
            for neg, exp_arg in imap(is_1pexp, [(1 + exp(-x)), (exp(-x) + 1)]):
                assert not neg and theano.gof.graph.is_same_graph(exp_arg, -x)
            assert is_1pexp(1 - exp(x)) is None
            assert is_1pexp(2 + exp(x)) is None
            assert is_1pexp(exp(x) + 2) is None
            assert is_1pexp(exp(x) - 1) is None
            assert is_1pexp(-1 + exp(x)) is None
            assert is_1pexp(1 + 2 * exp(x)) is None
        finally:
            config.warn.identify_1pexp_bug = backup 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:20,代碼來源:test_sigm.py

示例12: test_DownsampleFactorMax_hessian

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import exp [as 別名]
def test_DownsampleFactorMax_hessian(self):
        # Example provided by Frans Cronje, see
        # https://groups.google.com/d/msg/theano-users/qpqUy_3glhw/JMwIvlN5wX4J
        x_vec = tensor.vector('x')
        z = tensor.dot(x_vec.dimshuffle(0, 'x'),
                       x_vec.dimshuffle('x', 0))
        y = pool_2d(input=z, ds=(2, 2), ignore_border=True)
        C = tensor.exp(tensor.sum(y))

        grad_hess = tensor.hessian(cost=C, wrt=x_vec)
        fn_hess = function(inputs=[x_vec], outputs=grad_hess)

        # The value has been manually computed from the theoretical gradient,
        # and confirmed by the implementation.

        assert numpy.allclose(fn_hess([1, 2]), [[0., 0.], [0., 982.7667]]) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:18,代碼來源:test_pool.py

示例13: test_elemwise1

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import exp [as 別名]
def test_elemwise1():
    """ Several kinds of elemwise expressions with no broadcasting,
    non power-of-two shape """

    shape = (3, 4)
    a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
                                               dtype='float32') + 0.5, 'a')
    b = tensor.fmatrix()

    # let debugmode catch any mistakes
    f = pfunc([b], [], updates=[(a, b ** a)], mode=mode_with_gpu)
    f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)

    # let debugmode catch any mistakes
    f = pfunc([b], [], updates=[(a, tensor.exp(b ** a))], mode=mode_with_gpu)
    f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)

    # let debugmode catch any mistakes
    f = pfunc([b], [], updates=[(a, a + b * tensor.exp(b ** a))],
              mode=mode_with_gpu)
    f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:23,代碼來源:test_basic_ops.py

示例14: test_elemwise3

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import exp [as 別名]
def test_elemwise3():
    """ Several kinds of elemwise expressions with dimension
    permutations and broadcasting"""

    shape = (3, 4, 5, 6)
    a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
                                               dtype='float32'), 'a')
    b = tensor.fvector()
    new_val = (a + b).dimshuffle([2, 0, 3, 1])
    new_val *= tensor.exp(1 + b ** a).dimshuffle([2, 0, 3, 1])
    f = pfunc([b], [], updates=[(a, new_val)], mode=mode_with_gpu)
    has_elemwise = False
    for i, node in enumerate(f.maker.fgraph.toposort()):
        has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
    assert not has_elemwise
    # let debugmode catch errors
    f(theano._asarray(numpy.random.rand(6), dtype='float32')) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:19,代碼來源:test_basic_ops.py

示例15: speed_elemwise_collapse

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import exp [as 別名]
def speed_elemwise_collapse():
    """ used to time if the collapse of ccontiguous dims are useful """

    shape = (30, 40, 50, 600)
    a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
                                                 dtype='float32'))
    a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
    a2 = tcn.shared_constructor(a, 'a')
    a3 = a2[:, ::2, :, :]
    b = tcn.CudaNdarrayType((False, False, False, False))()
    c = a3 + b * tensor.exp(1 + b ** a3)
    f = pfunc([b], [c], mode=mode_with_gpu)

    v = theano._asarray(numpy.random.rand(*shape), dtype='float32')
    v = v[:, ::2, :, :]
    v = cuda_ndarray.CudaNdarray(v)
    t1 = time.time()
    for i in range(100):
        # let debugmode catch errors
        f(v)
    t2 = time.time() 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:23,代碼來源:test_basic_ops.py


注:本文中的theano.tensor.exp方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。