当前位置: 首页>>代码示例>>Python>>正文


Python tensor.fscalar方法代码示例

本文整理汇总了Python中theano.tensor.fscalar方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.fscalar方法的具体用法?Python tensor.fscalar怎么用?Python tensor.fscalar使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.fscalar方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: setUp

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fscalar [as 别名]
def setUp(self):
        self.iv = T.tensor(dtype='int32', broadcastable=(False,))
        self.fv = T.tensor(dtype='float32', broadcastable=(False,))
        self.fv1 = T.tensor(dtype='float32', broadcastable=(True,))
        self.dv = T.tensor(dtype='float64', broadcastable=(False,))
        self.dv1 = T.tensor(dtype='float64', broadcastable=(True,))
        self.cv = T.tensor(dtype='complex64', broadcastable=(False,))
        self.zv = T.tensor(dtype='complex128', broadcastable=(False,))

        self.fv_2 = T.tensor(dtype='float32', broadcastable=(False,))
        self.fv1_2 = T.tensor(dtype='float32', broadcastable=(True,))
        self.dv_2 = T.tensor(dtype='float64', broadcastable=(False,))
        self.dv1_2 = T.tensor(dtype='float64', broadcastable=(True,))
        self.cv_2 = T.tensor(dtype='complex64', broadcastable=(False,))
        self.zv_2 = T.tensor(dtype='complex128', broadcastable=(False,))

        self.fm = T.fmatrix()
        self.dm = T.dmatrix()
        self.cm = T.cmatrix()
        self.zm = T.zmatrix()

        self.fa = T.fscalar()
        self.da = T.dscalar()
        self.ca = T.cscalar()
        self.za = T.zscalar() 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:27,代码来源:test_blas.py

示例2: test_copy_delete_updates

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fscalar [as 别名]
def test_copy_delete_updates(self):
        x = T.fscalar('x')
        # SharedVariable for tests, one of them has update
        y = theano.shared(value=1, name='y')
        z = theano.shared(value=2, name='z')
        out = x + y + z

        # Test for different linkers
        # for mode in ["FAST_RUN","FAST_COMPILE"]:
        # second_time = False
        for mode in ["FAST_RUN", "FAST_COMPILE"]:
            ori = theano.function([x], out, mode=mode, updates={z: z * 2})
            cpy = ori.copy(delete_updates=True)

            assert cpy(1)[0] == 4
            assert cpy(1)[0] == 4
            assert cpy(1)[0] == 4 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:19,代码来源:test_function_module.py

示例3: test_param_allow_downcast_floatX

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fscalar [as 别名]
def test_param_allow_downcast_floatX(self):
        a = tensor.fscalar('a')
        b = tensor.fscalar('b')
        c = tensor.fscalar('c')

        f = pfunc([In(a, allow_downcast=True),
                   In(b, allow_downcast=False),
                   In(c, allow_downcast=None)],
                  (a + b + c))

        # If the values can be accurately represented, everything is OK
        assert numpy.all(f(0, 0, 0) == 0)

        # If allow_downcast is True, idem
        assert numpy.allclose(f(0.1, 0, 0), 0.1)

        # If allow_downcast is False, nope
        self.assertRaises(TypeError, f, 0, 0.1, 0)

        # If allow_downcast is None, it should work iff floatX=float32
        if config.floatX == 'float32':
            assert numpy.allclose(f(0, 0, 0.1), 0.1)
        else:
            self.assertRaises(TypeError, f, 0, 0, 0.1) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:26,代码来源:test_pfunc.py

示例4: test_gpualloc_output_to_gpu

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fscalar [as 别名]
def test_gpualloc_output_to_gpu():
    a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
    a = tcn.shared_constructor(a_val)

    b = T.fscalar()
    f = theano.function([b], T.ones_like(a) + b, mode=mode_without_gpu)
    f_gpu = theano.function([b], B.gpu_from_host(T.ones_like(a)) + b,
                            mode=mode_with_gpu)

    f(2)
    f_gpu(2)

    assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 1
    assert sum([node.op == B.gpu_alloc
                for node in f_gpu.maker.fgraph.toposort()]) == 1

    assert numpy.allclose(numpy.ones(a.get_value(borrow=True).shape) + 9,
                          f_gpu(9))
    assert numpy.allclose(f(5), f_gpu(5)) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:21,代码来源:test_basic_ops.py

示例5: setUp

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fscalar [as 别名]
def setUp(self):

        super(TestPdbBreakpoint, self).setUp()

        # Sample computation that involves tensors with different numbers
        # of dimensions
        self.input1 = T.fmatrix()
        self.input2 = T.fscalar()
        self.output = T.dot((self.input1 - self.input2),
                            (self.input1 - self.input2).transpose())

        # Declare the conditional breakpoint
        self.breakpointOp = PdbBreakpoint("Sum of output too high")
        self.condition = T.gt(self.output.sum(), 1000)
        (self.monitored_input1,
         self.monitored_input2,
         self.monitored_output) = self.breakpointOp(self.condition,
                                                    self.input1,
                                                    self.input2, self.output) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:21,代码来源:test_breakpoint.py

示例6: test_copy_delete_updates

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fscalar [as 别名]
def test_copy_delete_updates(self):
        x = T.fscalar('x')
        # SharedVariable for tests, one of them has update
        y = theano.shared(value=1, name='y')
        z = theano.shared(value=2, name='z')
        out = x+y+z

        # Test for different linkers
        # for mode in ["FAST_RUN","FAST_COMPILE"]:
        second_time = False
        for mode in ["FAST_RUN","FAST_COMPILE"]:
            ori = theano.function([x], out, mode=mode,updates={z:z*2})
            cpy = ori.copy(delete_updates=True)

            assert cpy(1)[0] == 4
            assert cpy(1)[0] == 4
            assert cpy(1)[0] == 4 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:19,代码来源:test_function_module.py

示例7: get_SGD_trainer

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fscalar [as 别名]
def get_SGD_trainer(self):
        """ Returns a plain SGD minibatch trainer with learning rate as param. """
        batch_x = T.fmatrix('batch_x')
        batch_y = T.ivector('batch_y')
        learning_rate = T.fscalar('lr')  # learning rate
        gparams = T.grad(self.mean_cost, self.params)  # all the gradients
        updates = OrderedDict()
        for param, gparam in zip(self.params, gparams):
            updates[param] = param - gparam * learning_rate

        train_fn = theano.function(inputs=[theano.Param(batch_x),
                                           theano.Param(batch_y),
                                           theano.Param(learning_rate)],
                                   outputs=self.mean_cost,
                                   updates=updates,
                                   givens={self.x: batch_x, self.y: batch_y})

        return train_fn 
开发者ID:syhw,项目名称:DL4H,代码行数:20,代码来源:dnn.py

示例8: get_adagrad_trainer

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fscalar [as 别名]
def get_adagrad_trainer(self):
        """ Returns an Adagrad (Duchi et al. 2010) trainer using a learning rate.
        """
        batch_x = T.fmatrix('batch_x')
        batch_y = T.ivector('batch_y')
        learning_rate = T.fscalar('lr')  # learning rate
        gparams = T.grad(self.mean_cost, self.params)  # all the gradients
        updates = OrderedDict()
        for accugrad, param, gparam in zip(self._accugrads, self.params, gparams):
            # c.f. Algorithm 1 in the Adadelta paper (Zeiler 2012)
            agrad = accugrad + gparam * gparam
            dx = - (learning_rate / T.sqrt(agrad + self._eps)) * gparam
            updates[param] = param + dx
            updates[accugrad] = agrad

        train_fn = theano.function(inputs=[theano.Param(batch_x), 
            theano.Param(batch_y),
            theano.Param(learning_rate)],
            outputs=self.mean_cost,
            updates=updates,
            givens={self.x: batch_x, self.y: batch_y})

        return train_fn 
开发者ID:syhw,项目名称:DL4H,代码行数:25,代码来源:dnn.py

示例9: test_default_dtype

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fscalar [as 别名]
def test_default_dtype(self):
        random = RandomStreams(utt.fetch_seed())
        low = tensor.dscalar()
        high = tensor.dscalar()

        # Should not silently downcast from low and high
        out0 = random.uniform(low=low, high=high, size=(42,))
        assert out0.dtype == 'float64'
        f0 = function([low, high], out0)
        val0 = f0(-2.1, 3.1)
        assert val0.dtype == 'float64'

        # Should downcast, since asked explicitly
        out1 = random.uniform(low=low, high=high, size=(42,), dtype='float32')
        assert out1.dtype == 'float32'
        f1 = function([low, high], out1)
        val1 = f1(-1.1, 1.1)
        assert val1.dtype == 'float32'

        # Should use floatX
        lowf = tensor.fscalar()
        highf = tensor.fscalar()
        outf = random.uniform(low=lowf, high=highf, size=(42,))
        assert outf.dtype == config.floatX
        ff = function([lowf, highf], outf)
        valf = ff(numpy.float32(-0.1), numpy.float32(0.3))
        assert valf.dtype == config.floatX 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:29,代码来源:test_shared_randomstreams.py

示例10: test_copy_share_memory

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fscalar [as 别名]
def test_copy_share_memory(self):
        x = T.fscalar('x')
        # SharedVariable for tests, one of them has update
        y = theano.shared(value=1)
        z = theano.shared(value=2)
        out = T.tanh((x + y + 2) / (x + z - 0.2)**2)

        # Test for different linkers
        for mode in ["FAST_RUN", "FAST_COMPILE"]:
            ori = theano.function([x], [out], mode=mode, updates={z: z + 1})
            cpy = ori.copy(share_memory=True)

            # Test if memories shared
            storage_map_ori = ori.fn.storage_map
            storage_map_cpy = cpy.fn.storage_map
            fgraph_cpy = cpy.maker.fgraph

            # Assert intermediate and Constants storages are shared.
            # and output stoarges are not shared
            i_o_variables = fgraph_cpy.inputs + fgraph_cpy.outputs
            ori_storages = storage_map_ori.values()
            l = [val for key, val in storage_map_cpy.items()
                 if key not in i_o_variables or isinstance(key, theano.tensor.Constant)]
            for storage in l:
                self.assertTrue(any([storage is s for s in ori_storages]))

            # Assert storages of SharedVariable without updates are shared
            for (input, _1, _2), here, there in zip(ori.indices,
                                                    ori.input_storage,
                                                    cpy.input_storage):
                self.assertTrue(here.data is there.data) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:33,代码来源:test_function_module.py

示例11: test_gpualloc_input_on_gpu

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fscalar [as 别名]
def test_gpualloc_input_on_gpu():
    a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
    a = tcn.shared_constructor(a_val)

    b = T.fscalar()
    f = theano.function([b], T.ones_like(a) + b, mode=mode_without_gpu)
    f_gpu = theano.function([b], T.ones_like(a) + b, mode=mode_with_gpu)

    assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 1
    assert sum([node.op == B.gpu_alloc
                for node in f_gpu.maker.fgraph.toposort()]) == 1

    assert numpy.allclose(numpy.ones(a.get_value(borrow=True).shape) + 9,
                          f_gpu(9))
    assert numpy.allclose(f(5), f_gpu(5)) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:17,代码来源:test_basic_ops.py

示例12: test_scalar

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fscalar [as 别名]
def test_scalar(self):
        x = cuda.fscalar()
        y = numpy.array(7, dtype='float32')
        assert y.size == theano.function([x], x.size)(y) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:6,代码来源:test_basic_ops.py

示例13: compile

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fscalar [as 别名]
def compile(self, cost, error_map_pyx, add_updates=[], debug_info=[]):
        batch_idx = T.iscalar()
        learning_rate = T.fscalar()

        updates, norm_grad = self.hp.optimizer(cost, self.params.values(), lr=learning_rate)

        updates += add_updates

        self.outidx = {'cost':0, 'error_map_pyx':1, 'norm_grad':2}
        outputs = [cost, error_map_pyx]

        self.train = theano.function(inputs=[batch_idx, learning_rate], updates=updates,
                                     givens={
                                         self.X:self.data['tr_X'][batch_idx * self.hp.batch_size : 
                                                                  (batch_idx+1) * self.hp.batch_size],
                                         self.Y:self.data['tr_Y'][batch_idx * self.hp.batch_size : 
                                                                  (batch_idx+1) * self.hp.batch_size]},
                                     outputs=outputs + [norm_grad])
                                     #,mode=theano.compile.nanguardmode.NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True))

        #T.printing.debugprint(self.train)
        #T.printing.pydotprint(self.train, outfile="logreg_pydotprint_train.png", var_with_name_simple=True)
        
        self.validate = theano.function(inputs=[batch_idx], 
                                        givens={
                                         self.X:self.data['va_X'][batch_idx * self.hp.test_batch_size : 
                                                                  (batch_idx+1) * self.hp.test_batch_size],
                                         self.Y:self.data['va_Y'][batch_idx * self.hp.test_batch_size : 
                                                                  (batch_idx+1) * self.hp.test_batch_size]},
                                    outputs=outputs)
        
        self.test = theano.function(inputs=[batch_idx], 
                                    givens={
                                         self.X:self.data['te_X'][batch_idx * self.hp.test_batch_size : 
                                                                  (batch_idx+1) * self.hp.test_batch_size],
                                         self.Y:self.data['te_Y'][batch_idx * self.hp.test_batch_size : 
                                                                  (batch_idx+1) * self.hp.test_batch_size]},
                                    outputs=outputs)

# -------------------------------------------------------------------------------------------------- 
开发者ID:Ivaylo-Popov,项目名称:Theano-Lights,代码行数:42,代码来源:modelbase.py

示例14: test_copy_share_memory

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fscalar [as 别名]
def test_copy_share_memory(self):
        x = T.fscalar('x')
        # SharedVariable for tests, one of them has update
        y = theano.shared(value=1)
        z = theano.shared(value=2)
        out = T.tanh((x+y+2)/(x+z-0.2)**2)

        # Test for different linkers
        for mode in ["FAST_RUN","FAST_COMPILE"]:
            ori = theano.function([x], [out], mode=mode,updates={z:z+1})
            cpy = ori.copy(share_memory=True)

            # Test if memories shared
            storage_map_ori = ori.fn.storage_map
            storage_map_cpy = cpy.fn.storage_map
            fgraph_ori = ori.maker.fgraph
            fgraph_cpy = cpy.maker.fgraph

            # Assert intermediate and Constants storages are shared.
            # and output stoarges are not shared
            i_o_variables = fgraph_cpy.inputs + fgraph_cpy.outputs
            ori_storages = storage_map_ori.values()
            for key in storage_map_cpy.keys():
                storage = storage_map_cpy[key]
                if key not in i_o_variables or isinstance(key, theano.tensor.Constant):
                    self.assertTrue(any([ storage is s for s in ori_storages]))

            # Assert storages of SharedVariable without updates are shared
            for (input, _1, _2), here, there in zip(ori.indices,
                                                    ori.input_storage,
                                                    cpy.input_storage):
                self.assertTrue(here.data is there.data) 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:34,代码来源:test_function_module.py

示例15: get_rmsprop_trainer

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fscalar [as 别名]
def get_rmsprop_trainer(self, with_step_adapt=True, nesterov=False):  # TODO Nesterov momentum
        """ Returns an RmsProp (possibly Nesterov) (Sutskever 2013) trainer
        using self._rho, self._eps and self._momentum params. """
        # TODO CHECK
        batch_x = T.fmatrix('batch_x')
        batch_y = T.ivector('batch_y')
        learning_rate = T.fscalar('lr')  # learning rate
        gparams = T.grad(self.mean_cost, self.params)
        updates = OrderedDict()
        for accugrad, avggrad, accudelta, sa, param, gparam in zip(
                self._accugrads, self._avggrads, self._accudeltas,
                self._stepadapts, self.params, gparams):
            acc_grad = self._rho * accugrad + (1 - self._rho) * gparam * gparam
            avg_grad = self._rho * avggrad + (1 - self._rho) * gparam  # this decay/discount (self._rho) should differ from the one of the line above
            ###scaled_grad = gparam / T.sqrt(acc_grad + self._eps)  # original RMSprop gradient scaling
            scaled_grad = gparam / T.sqrt(acc_grad - avg_grad**2 + self._eps)  # Alex Graves' RMSprop variant (divide by a "running stddev" of the updates)
            if with_step_adapt:
                incr = sa * (1. + self._stepadapt_alpha)
                #decr = sa * (1. - self._stepadapt_alpha)
                decr = sa * (1. - 2*self._stepadapt_alpha)
                ###steps = sa * T.switch(accudelta * -gparam >= 0, incr, decr)
                steps = T.clip(T.switch(accudelta * -gparam >= 0, incr, decr), self._eps, 1./self._eps)  # bad overloading of self._eps!
                scaled_grad = steps * scaled_grad
                updates[sa] = steps
            dx = self._momentum * accudelta - learning_rate * scaled_grad
            updates[param] = param + dx
            updates[accugrad] = acc_grad
            updates[avggrad] = avg_grad
            updates[accudelta] = dx

        train_fn = theano.function(inputs=[theano.Param(batch_x),
                                           theano.Param(batch_y),
                                           theano.Param(learning_rate)],
                                   outputs=self.mean_cost,
                                   updates=updates,
                                   givens={self.x: batch_x, self.y: batch_y})

        return train_fn 
开发者ID:syhw,项目名称:DL4H,代码行数:40,代码来源:dnn.py


注:本文中的theano.tensor.fscalar方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。