当前位置: 首页>>代码示例>>Python>>正文


Python cuda.use函数代码示例

本文整理汇总了Python中theano.sandbox.cuda.use函数的典型用法代码示例。如果您正苦于以下问题:Python use函数的具体用法?Python use怎么用?Python use使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了use函数的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_maxout_conv_c01b_cifar10

 def test_maxout_conv_c01b_cifar10(self):
     if cuda.cuda_available is False:
         raise SkipTest('Optional package cuda disabled')
     if not hasattr(cuda, 'unuse'):
         raise Exception("Theano version too old to run this test!")
     # Tests that we can run a small convolutional model on GPU,
     assert cuda.cuda_enabled is False
     # Even if there is a GPU, but the user didn't specify device=gpu
     # we want to run this test.
     try:
         old_floatX = config.floatX
         cuda.use('gpu')
         config.floatX = 'float32'
         try:
             train = yaml_parse.load(yaml_string_maxout_conv_c01b_cifar10)
         except NoDataPathError:
             raise SkipTest("PYLEARN2_DATA_PATH environment variable "
                            "not defined")
         train.main_loop()
         # Check that the performance is close to the expected one:
         # test_y_misclass: 0.3777000308036804
         misclass_chan = train.algorithm.monitor.channels['test_y_misclass']
         assert misclass_chan.val_record[-1] < 0.38
         # test_y_nll: 1.0978516340255737
         nll_chan = train.algorithm.monitor.channels['test_y_nll']
         assert nll_chan.val_record[-1] < 1.1
     finally:
         config.floatX = old_floatX
         cuda.unuse()
     assert cuda.cuda_enabled is False
开发者ID:SuperElectric,项目名称:pylearn2,代码行数:30,代码来源:test_maxout.py

示例2: init_theano

def init_theano():
	"""Initialize Theano for Caffe
	"""
	global theano_initialized
	if theano_initialized:
		return
	import caffe
	from theano.sandbox.cuda import use
	assert caffe.check_mode_gpu()
	use('gpu%d' % caffe.get_device())
	theano_initialized = True
开发者ID:NHZlX,项目名称:tnarihi-caffe-helper,代码行数:11,代码来源:theano_util.py

示例3: set_gpu_from_theano

def set_gpu_from_theano():
    """
    This set the GPU used by PyCUDA to the same as the one used by Theano.
    """
    #import pdb;pdb.set_trace()
    if cuda.use.device_number is None:
        cuda.use("gpu",
                 force=False,
                 default_to_move_computation_to_gpu=False,
                 move_shared_float32_to_gpu=False,
                 enable_cuda=True,
                 test_driver=True)

    assert cuda.use.device_number == cuda_ndarray.active_device_number()
开发者ID:NicolasBouchard,项目名称:Theano,代码行数:14,代码来源:pycuda_init.py

示例4: test_cuda

def test_cuda():
  import theano.sandbox.cuda as theano_cuda
  assert_true(theano_cuda.cuda_available, "Theano CUDA support not available. Check that nvcc is in $PATH.")
  if theano_cuda.cuda_enabled: # already enabled when $THEANO_FLAGS=device=gpu
    print("CUDA already enabled")
  else:
    print("Call theano_cuda.use")
    theano_cuda.use(device="gpu", force=True)
  try:
    import cuda_ndarray.cuda_ndarray as cuda
  except ImportError as exc:
    raise Exception("Theano CUDA support seems broken: %s" % exc)
  id = cuda.active_device_number(); """ :type: int """
  device_name = cuda.active_device_name(); """ :type: str """
  print("id: %i", id)
  print("dev name: %s" % device_name)
开发者ID:atuxhe,项目名称:returnn,代码行数:16,代码来源:test_gpu.py

示例5: test_maxout_conv_c01b_basic

 def test_maxout_conv_c01b_basic(self):
     if cuda.cuda_available is False:
         raise SkipTest('Optional package cuda disabled')
     if not hasattr(cuda, 'unuse'):
         raise Exception("Theano version too old to run this test!")
     # Tests that we can run a small convolutional model on GPU,
     assert cuda.cuda_enabled is False
     # Even if there is a GPU, but the user didn't specify device=gpu
     # we want to run this test.
     try:
         old_floatX = config.floatX
         cuda.use('gpu')
         config.floatX = 'float32'
         train = yaml_parse.load(yaml_string_maxout_conv_c01b_basic)
         train.main_loop()
     finally:
         config.floatX = old_floatX
         cuda.unuse()
     assert cuda.cuda_enabled is False
开发者ID:SuperElectric,项目名称:pylearn2,代码行数:19,代码来源:test_maxout.py

示例6: test_output_broadcast_cuda

    def test_output_broadcast_cuda(self):
        from theano.sandbox import cuda
        if not cuda.cuda_available:
            raise SkipTest("Optional package Cuda disabled")
        if cuda.use.device_number is None:
            # We should normally set VecAsRowAndCol as a GPUOp But we
            # don't want to do this here as this will disable others
            # tests in this file.  So we manually init the GPU if
            # needed to remove warning.
            cuda.use("gpu",
                     force=True,
                     default_to_move_computation_to_gpu=False,
                     move_shared_float32_to_gpu=False,
                     enable_cuda=False)
        v = cuda.fvector('v')
        c, r = VecAsRowAndCol()(v)
        f = theano.function([v], [c, r])

        v_val = cuda.CudaNdarray(self.rng.randn(5).astype('float32'))
        f(v_val)
开发者ID:317070,项目名称:Theano,代码行数:20,代码来源:test_debugmode.py

示例7: py_conv_valid_numpy

#needed as the gpu conv don't have a perform implementation.
if theano.config.mode == 'FAST_COMPILE':
    theano_mode = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
else:
    theano_mode = theano.compile.mode.get_default_mode().including('gpu')

cuda_tensor4 = cuda.CudaNdarrayType([False] * 4)

device_id = theano.sandbox.cuda.use.device_number
if device_id is None:
    cuda.shared_constructor(numpy.zeros(2, dtype='float32'))
device_id = theano.sandbox.cuda.use.device_number
if device_id is None:
    cuda.use("gpu",
             force=False,
             default_to_move_computation_to_gpu=False,
             move_shared_float32_to_gpu=False,
             enable_cuda=False,
             test_driver=True)
    device_id = theano.sandbox.cuda.use.device_number
    
cuda_ndarray = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray
device_prop = cuda_ndarray.device_properties(device_id)


def py_conv_valid_numpy(img, kern):
    assert img.shape[1] == kern.shape[1]
    outshp = (img.shape[0], kern.shape[0],
            img.shape[2] - kern.shape[2] + 1,
            img.shape[3] - kern.shape[3] + 1)
    out = numpy.zeros(outshp, dtype='float32')
    for b in xrange(out.shape[0]):
开发者ID:JoeGlobal2014,项目名称:Theano,代码行数:32,代码来源:test_conv_cuda_ndarray.py

示例8: tes_use

def tes_use():
    tcn.use()
开发者ID:gexarcha,项目名称:Theano,代码行数:2,代码来源:test_basic_ops.py

示例9: use

   http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf


"""
import numpy
from theano.compile.sandbox import shared, pfunc
from theano import tensor
from pylearn.shared.layers import LogisticRegression, SigmoidalLayer
import theano.sandbox.softsign
import pylearn.datasets.MNIST


try:
    # this tells theano to use the GPU if possible
    from theano.sandbox.cuda import use
    use()
except Exception, e:
    print('Warning: Attempt to use GPU resulted in error "%s"' % str(e))

class LeNetConvPool(object):
    """WRITEME 

    Math of what the layer does, and what symbolic variables are created by the class (w, b,
    output).

    """

    #TODO: implement biases & scales properly. There are supposed to be more parameters.
    #    - one bias & scale per filter
    #    - one bias & scale per downsample feature location (a 2d bias)
    #    - more?
开发者ID:azizur77,项目名称:DeepLearningTutorials,代码行数:31,代码来源:convolutional_mlp.py

示例10: SkipTest

from numpy.testing.noseclasses import KnownFailureTest

import theano.sandbox.gpuarray

if theano.sandbox.gpuarray.pygpu is None:
    raise SkipTest("pygpu not installed")

# If you are writing a new test file, don't copy this code, but rather
# import stuff from this file (like mode_with_gpu) to reuse it.
import theano.sandbox.cuda as cuda_ndarray
if cuda_ndarray.cuda_available and not theano.sandbox.gpuarray.pygpu_activated:
    if not cuda_ndarray.use.device_number:
        # We should not enable all the use like the flag device=gpu,
        # as many tests don't work in that setup.
        cuda_ndarray.use('gpu',
                         default_to_move_computation_to_gpu=False,
                         move_shared_float32_to_gpu=False,
                         enable_cuda=False)
    theano.sandbox.gpuarray.init_dev('cuda')

if not theano.sandbox.gpuarray.pygpu_activated:
    raise SkipTest("pygpu disabled")

from ..type import (GpuArrayType,
                    gpuarray_shared_constructor)
from ..basic_ops import (
    host_from_gpu, gpu_from_host,
    gpu_alloc, GpuAlloc,
    GpuAllocEmpty,
    gpu_from_cuda,
    cuda_from_gpu, HostFromGpu,
    GpuContiguous,
开发者ID:Ambier,项目名称:Theano,代码行数:32,代码来源:test_basic_ops.py

示例11: SkipTest

import numpy

import theano
from theano.tests import unittest_tools as utt
from theano.sandbox.gpuarray.basic_ops import GpuReshape
import theano.sandbox.gpuarray

if theano.sandbox.gpuarray.pygpu is None:
    raise SkipTest("pygpu not installed")

import theano.sandbox.cuda as cuda_ndarray
if cuda_ndarray.cuda_available and not theano.sandbox.gpuarray.pygpu_activated:
    if not cuda_ndarray.use.device_number:
        cuda_ndarray.use('gpu')
    theano.sandbox.gpuarray.init_dev('cuda')

if not theano.sandbox.gpuarray.pygpu_activated:
    raise SkipTest("pygpu disabled")

if theano.config.mode == 'FAST_COMPILE':
    mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpuarray').excluding('gpu')
    mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpuarray')
else:
    mode_with_gpu = theano.compile.mode.get_default_mode().including('gpuarray').excluding('gpu')
    mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpuarray')


def test_flatten():
    m = theano.tensor.fmatrix()
    f = theano.function([m], m.flatten(), mode=mode_with_gpu)
    val = numpy.random.rand(10,11).astype("float32")
开发者ID:csxlyan,项目名称:Theano,代码行数:31,代码来源:test_opt.py


注:本文中的theano.sandbox.cuda.use函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。