当前位置: 首页>>代码示例>>Python>>正文


Python tensor.dscalar函数代码示例

本文整理汇总了Python中theano.tensor.dscalar函数的典型用法代码示例。如果您正苦于以下问题:Python dscalar函数的具体用法?Python dscalar怎么用?Python dscalar使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了dscalar函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: early_stop

    def early_stop(self, x_validate, y_validate):
        '''
        Creates validation set
        Evaluates Node's path on validation set
        Chooses optimal w in Node's path based on validation set
        '''
        x = T.matrix("x")
        y = T.vector("y")
        w = T.vector("w")
        b = T.dscalar("b")
        a = T.dscalar("a")
        p_1 = -0.5 + a / (1 + T.exp(-T.dot(x, w) - b))
        xent = 0.5 * (y - p_1)**2
        cost = xent.mean()
        loss = theano.function(inputs=[x, y, w, b, a], outputs=cost)

        Path = self.path.keys()
        Path = map(int, Path)
        Path.sort()
        best_node = {}
        best_node_ind = 0
        best_loss = numpy.mean(y_validate**2)
        losses = []
        for ind in Path:
            node = self.path[str(ind)]
            l = loss(x_validate, y_validate, node['w'], node['b'], node['a'])
            losses.append(l)
            if l < best_loss:
                best_node = node
                best_node_ind = ind
                best_loss = l

        self.w = best_node['w']
        self.b = best_node['b']
        self.a = best_node['a']
开发者ID:jcreus,项目名称:NNBuilder,代码行数:35,代码来源:node.py

示例2: __init__

    def __init__(self,retina=None,config=None,name=None,input_variable=None): 
        self.retina = retina 
        self.config = config
        self.state = None
        if name is None:
            name = str(uuid.uuid4())
        self.name = self.config.get('name',name)
        # 3d version
        self._I = T.dtensor3(self.name+"_I")
        self._preceding_V = T.dmatrix(self.name+"_preceding_V") # initial condition for sequence
        self._b_0 = T.dscalar(self.name+"_b_0")
        self._a_0 = T.dscalar(self.name+"_a_0")
        self._a_1 = T.dscalar(self.name+"_a_1")
        self._k = T.iscalar(self.name+"_k_bip") # number of iteration steps
        def bipolar_step(input_image,
                        preceding_V,b_0, a_0, a_1):
            V = (input_image * b_0 - preceding_V * a_1) / a_0
            return V

        # The order in theano.scan has to match the order of arguments in the function bipolar_step
        self._result, self._updates = theano.scan(fn=bipolar_step,
                                      outputs_info=[self._preceding_V],
                                      sequences = [self._I],
                                      non_sequences=[self._b_0, self._a_0, self._a_1],
                                      n_steps=self._k)
        self.output_varaible = self._result[0]
        # The order of arguments presented here is arbitrary (will be inferred by the symbols provided),
        #  but function calls to compute_V_bip have to match this order!
        self.compute_V = theano.function(inputs=[self._I,self._preceding_V,
                                                      self._b_0, self._a_0, self._a_1,
                                                      self._k], 
                                              outputs=self._result, 
                                              updates=self._updates)
开发者ID:jahuth,项目名称:retina,代码行数:33,代码来源:vision.py

示例3: add_scalars

def add_scalars():
  x = T.dscalar('x')
  y = T.dscalar('y')
  z = x + y
  f = function([x, y], z)
  print(f(2, 4))
  print(f(5, 4))
开发者ID:bin3,项目名称:learnpy,代码行数:7,代码来源:theano_demo.py

示例4: LQLEP_wBarrier

def LQLEP_wBarrier( LQLEP    = Th.dscalar(), ldet = Th.dscalar(), v1 = Th.dvector(), 
                    N_spike  = Th.dscalar(), ImM  = Th.dmatrix(),  U = Th.dmatrix(),
                    V2       = Th.dvector(),    u = Th.dvector(),  C = Th.dmatrix(),
                    **other):
    '''
    The actual Linear-Quadratic-Exponential-Poisson log-likelihood, 
    as a function of theta and M, 
    with a barrier on the log-det term and a prior.
    '''
    sq_nonlinearity = V2**2.*Th.sum( Th.dot(U,C)*U, axis=[1])  #Th.sum(U**2,axis=[1])
    nonlinearity = V2 * Th.sqrt( Th.sum( Th.dot(U,C)*U, axis=[1])) #Th.sum(U**2,axis=[1]) )
    if other.has_key('uc'):
        LQLEP_wPrior = LQLEP + 0.5 * N_spike * ( 1./(ldet+250.)**2. \
                     - 0.000001 * Th.sum(Th.log(1.-4*sq_nonlinearity))) \
                     + 10. * Th.sum( (u[2:]+u[:-2]-2*u[1:-1])**2. ) \
                     + 10. * Th.sum( (other['uc'][2:]+other['uc'][:-2]-2*other['uc'][1:-1])**2. ) \
                     + 0.000000001 * Th.sum( v1**2. )
#                     + 100. * Th.sum( v1 )
    #                 + 0.0001*Th.sum( V2**2 )
    else:
        LQLEP_wPrior = LQLEP + 0.5 * N_spike * ( 1./(ldet+250.)**2. \
                     - 0.000001 * Th.sum(Th.log(1.-4*sq_nonlinearity))) \
                     + 10. * Th.sum( (u[2:]+u[:-2]-2*u[1:-1])**2. ) \
                     + 0.000000001 * Th.sum( v1**2. )
#                     + 100. * Th.sum( v1 )
    #                 + 0.0001*Th.sum( V2**2 )
    eigsImM,barrier = eig( ImM )
    barrier   = 1-(Th.sum(Th.log(eigsImM))>-250) * \
                  (Th.min(eigsImM)>0) * (Th.max(4*sq_nonlinearity)<1)
    other.update(locals())
    return named( **other )
开发者ID:kolia,项目名称:subunits,代码行数:31,代码来源:QuadPoiss.py

示例5: sample_gradient

def sample_gradient():
    print "微分"
    x, y = T.dscalars("x", "y")
    z = (x+2*y)**2
    # dz/dx
    gx = T.grad(z, x)
    fgx = theano.function([x,y], gx)
    print fgx(1.0, 1.0)
    # dz/dy
    gy = T.grad(z, y)
    fgy = theano.function([x,y], gy)
    print fgy(1.0, 1.0)
    # d{sigmoid(x)}/dx
    x = T.dscalar("x")
    sig = sigmoid(x)
    dsig = T.grad(sig, x)
    f = theano.function([x], dsig)
    print f(0.0)
    print f(1.0)
    # d{sigmoid(<x,w>)}/dx
    w = T.dscalar("w")
    sig = sigmoid(T.dot(x,w))
    dsig = T.grad(sig, x)
    f = theano.function([x, w], dsig)
    print f(1.0, 2.0)
    print f(3.0, 4.0)
    print
开发者ID:norikinishida,项目名称:snippets,代码行数:27,代码来源:sample.py

示例6: leapfrog1_dE

def leapfrog1_dE(H, q, profile):
    """Computes a theano function that computes one leapfrog step and the energy difference between the beginning and end of the trajectory.
    Parameters
    ----------
    H : Hamiltonian
    q : theano.tensor
    profile : Boolean

    Returns
    -------
    theano function which returns
    q_new, p_new, dE
    """
    p = tt.dvector('p')
    p.tag.test_value = q.tag.test_value

    e = tt.dscalar('e')
    e.tag.test_value = 1

    q1, p1 = leapfrog(H, q, p, 1, e)
    E = energy(H, q1, p1)

    E0 = tt.dscalar('E0')
    E0.tag.test_value = 1

    dE = E - E0

    f = theano.function([q, p, e, E0], [q1, p1, dE], profile=profile)
    f.trust_input = True
    return f
开发者ID:hstm,项目名称:pymc3,代码行数:30,代码来源:nuts.py

示例7: make_minimizer

def make_minimizer(Model):
    L, y = T.ivector('L'), T.dvector('y')
    mu, eps = T.dscalar('mu'), T.dscalar('eps')
    R, eta = T.dtensor3('R'),  T.dvector('eta')

    model = Model(L, y, mu, R, eta, eps)
    return theano.function([L, y, mu, R, eta, eps], model.minimize())
开发者ID:pminervini,项目名称:knowledge-propagation,代码行数:7,代码来源:momentum.py

示例8: train_minibatch_fn

    def train_minibatch_fn(self, evaluate=False):
        """
        Initialize this Theano function once
        """
        X = T.lmatrix('X_train')
        L_x = T.lvector('L_X_train')

        Y = T.lmatrix('Y_train')
        L_y = T.lvector('L_y_train')

        learning_rate = T.dscalar('learning_rate')
        momentum = T.dscalar('momentum')
        weight_decay = T.dscalar('weight_decay')

        loss, accuracy = self.loss(X, L_x, Y, L_y, weight_decay)
        updates = self.get_sgd_updates(loss, learning_rate, momentum)

        outputs = [loss, accuracy]

        if evaluate:
            precision, recall = self.evaluate(X, L_x, Y, L_y)
            outputs = outputs + [precision, recall]

        return theano.function(
            inputs=[X, L_x, Y, L_y, learning_rate, momentum, weight_decay],
            outputs=outputs,
            updates=updates
        )
开发者ID:tivaro,项目名称:ULL-P2,代码行数:28,代码来源:end_to_end_model.py

示例9: init_output_delta_function

 def init_output_delta_function(self):
     y = T.dscalar('example_value')
     a = T.dscalar('actual_value')
     dg = T.grad(self.activation)
     delta = dg(a) * (y - a)
     f = theano.function([a,y], delta)
     return f
开发者ID:johannbm,项目名称:MTDT-Projects,代码行数:7,代码来源:hidden_layer.py

示例10: create_function

def create_function():
    import theano.tensor as T

    x = T.dscalar('x')
    y = T.dscalar('y')
    z = x + y
    z.eval({x: 16.3, y: 12.1})
开发者ID:yonglei,项目名称:code,代码行数:7,代码来源:theano_tutorial.py

示例11: test_default_dtype

    def test_default_dtype(self):
        random = RandomStreams(utt.fetch_seed())
        low = tensor.dscalar()
        high = tensor.dscalar()

        # Should not silently downcast from low and high
        out0 = random.uniform(low=low, high=high, size=(42,))
        assert out0.dtype == 'float64'
        f0 = function([low, high], out0)
        val0 = f0(-2.1, 3.1)
        assert val0.dtype == 'float64'

        # Should downcast, since asked explicitly
        out1 = random.uniform(low=low, high=high, size=(42,), dtype='float32')
        assert out1.dtype == 'float32'
        f1 = function([low, high], out1)
        val1 = f1(-1.1, 1.1)
        assert val1.dtype == 'float32'

        # Should use floatX
        lowf = tensor.fscalar()
        highf = tensor.fscalar()
        outf = random.uniform(low=lowf, high=highf, size=(42,))
        assert outf.dtype == config.floatX
        ff = function([lowf, highf], outf)
        valf = ff(numpy.float32(-0.1), numpy.float32(0.3))
        assert valf.dtype == config.floatX
开发者ID:ChinaQuants,项目名称:Theano,代码行数:27,代码来源:test_shared_randomstreams.py

示例12: neural_net

    def neural_net(
            x=T.dmatrix(),    #our points, one point per row
            y=T.dmatrix(),    #our targets
            w=T.dmatrix(),    #first layer weights
            b=T.dvector(),    #first layer bias
            v=T.dmatrix(),    #second layer weights
            c=T.dvector(),    #second layer bias
            step=T.dscalar(), #step size for gradient descent
            l2_coef=T.dscalar() #l2 regularization amount
            ):
        """Idea A:
        """
        hid = T.tanh(T.dot(x, w) + b)
        pred = T.dot(hid, v) + c
        sse = T.sum((pred - y) * (pred - y))
        w_l2 = T.sum(T.sum(w*w))
        v_l2 = T.sum(T.sum(v*v))
        loss = sse + l2_coef * (w_l2 + v_l2)

        def symbolic_params(cls):
            return [cls.w, cls.b, cls.v, cls.c]

        def update(cls, x, y, **kwargs):
            params = cls.symbolic_params()
            gp = T.grad(cls.loss, params)
            return [], [In(p, update=p - cls.step * g) for p,g in zip(params, gp)]

        def predict(cls, x, **kwargs):
            return cls.pred, []

        return locals()
开发者ID:olivierverdier,项目名称:Theano,代码行数:31,代码来源:symbolic_module.py

示例13: test_divide_floats

 def test_divide_floats(self):
     a = T.dscalar('a')
     b = T.dscalar('b')
     c = theano.function([a, b], b / a)
     d = theano.function([a, b], b // a)
     assert c(6, 3) == 0.5
     assert d(6, 3) == 0.0
开发者ID:Abioy,项目名称:Theano,代码行数:7,代码来源:test_div_future.py

示例14: dtw

def dtw(array1, array2):
    """
    Accepts: two one dimensional arrays
    Returns: (float) DTW distance between them.
    """
    s = np.zeros((array1.size+1, array2.size+1))

    s[:,0] = 1e6
    s[0,:] = 1e6
    s[0,0] = 0.0

    # Set up symbolic variables
    square = T.dmatrix('square')
    vec1 = T.dvector('vec1')
    vec2 = T.dvector('vec2')
    vec1_length = T.dscalar('vec1_length')
    vec2_length = T.dscalar('vec2_length')
    outer_loop = T.arange(vec1_length, dtype='int64')
    inner_loop = T.arange(vec2_length, dtype='int64')

    # Run the outer loop
    path, _ = scan(fn=outer,
                    outputs_info=[dict(initial=square, taps=[-1])],
                    non_sequences=[inner_loop, vec1, vec2],
                    sequences=outer_loop)

    # Compile the function
    theano_square = function([vec1, vec2, square, vec1_length, vec2_length], path, on_unused_input='warn')

    # Call the compiled function and return the actual distance
    return theano_square(array1, array2, s, array1.size, array2.size)[-1][array1.size, array2.size]
开发者ID:astanway,项目名称:theano-dtw,代码行数:31,代码来源:dtw.py

示例15: theano_setup

    def theano_setup(self):
    
        # The matrices Wb and Wc were originally tied.
        # Because of that, I decided to keep Wb and Wc with
        # the same shape (instead of being transposed) to
        # avoid disturbing the code as much as possible.

        Wb = T.dmatrix('Wb')
        Wc = T.dmatrix('Wc')
        b = T.dvector('b')
        c = T.dvector('c')
        scale_s = T.dscalar('scale_s')
        scale_plus_x = T.dscalar('scale_plus_x')
        x = T.dmatrix('x')
    
        h_act = T.dot(x, Wc) + c
        if self.act_func[0] == 'tanh':
            h = T.tanh(h_act)
        elif self.act_func[0] == 'sigmoid':
            h = T.nnet.sigmoid(h_act)
        elif self.act_func[0] == 'id':
            # bad idea
            h = h_act
        else:
            error("Invalid act_func[0]")

        r_act = T.dot(h, Wb.T) + b
        if self.act_func[1] == 'tanh':
            r = scale_s * T.tanh(r_act)
        elif self.act_func[1] == 'sigmoid':
            r = scale_s * T.nnet.sigmoid(r_act)
        elif self.act_func[1] == 'id':
            r = scale_s * r_act
        else:
            error("Invalid act_func[1]")

        if self.want_plus_x:
            r = r + scale_plus_x * x

        # Another variable to be able to call a function
        # with a noisy x and compare it to a reference x.
        y = T.dmatrix('y')

        loss = ((r - y)**2)
        sum_loss = T.sum(loss)
        
        # theano_encode_decode : vectorial function in argument X.
        # theano_loss : vectorial function in argument X.
        # theano_gradients : returns triplet of gradients, each of
        #                    which involves the all data X summed
        #                    so it's not a "vectorial" function.

        self.theano_encode_decode = function([Wb, Wc, b, c, scale_s, scale_plus_x, x], r)
        self.theano_loss = function([Wb, Wc, b, c, scale_s, scale_plus_x, x, y], loss)

        self.theano_gradients = function([Wb, Wc, b, c, scale_s, scale_plus_x, x, y],
                                         [T.grad(sum_loss, Wb),      T.grad(sum_loss, Wc),
                                          T.grad(sum_loss, b),       T.grad(sum_loss, c),
                                          T.grad(sum_loss, scale_s), T.grad(sum_loss, scale_plus_x)])
开发者ID:gyom,项目名称:denoising_autoencoder,代码行数:59,代码来源:dae_untied_weights_plus_x.py


注:本文中的theano.tensor.dscalar函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。