当前位置: 首页>>代码示例>>Python>>正文


Python optdb.add_tags函数代码示例

本文整理汇总了Python中theano.compile.optdb.add_tags函数的典型用法代码示例。如果您正苦于以下问题:Python add_tags函数的具体用法?Python add_tags怎么用?Python add_tags使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了add_tags函数的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: use

def use(device,
        force=False,
        default_to_move_computation_to_gpu=True,
        move_shared_to_gpu=True,
        preallocate=None):
    """
    Error and warning about CUDA should be displayed only when this
    function is called. We need to be able to load this module only
    to check if it is available!

    Parameters
    ----------
    device : string
        "cuda", "cuda0", "cudaN", "" (N is the device number to use).
        "" mean do all the rest and don't init a device.
    force
        Will always raise an exception if we can't use the gpu.
    default_to_move_computation_to_gpu
        If gpu init succeeded, enable by default optimizations to move
        computations to the gpu.
    move_shared_to_gpu
        If gpu init succeeded, put new shared variables on the gpu.
    preallocate
        If specified, will use this value for preallocation instead of
        gpuarray.preallocate.

    """
    if force:
        if not (device.startswith('cuda') or device.startswith('opencl')):
            raise Exception("forced the init and bad device provided: " +
                            device)
        else:
            # If we force, the device should not already be initialized.
            assert device not in init_dev.devmap
    if device:
        init_dev(device, preallocate=preallocate)
    if default_to_move_computation_to_gpu:
        optdb.add_tags('gpuarray_opt', 'fast_run', 'fast_compile')
        optdb.add_tags('gpua_scanOp_make_inplace', 'fast_run')
    if move_shared_to_gpu:
        import theano.compile
        theano.compile.shared_constructor(gpuarray_shared_constructor)
开发者ID:athiwatp,项目名称:Theano,代码行数:42,代码来源:__init__.py

示例2: use


#.........这里部分代码省略.........
                        "Delete your Theano cache. The automatic"
                        " recompilation did not work.")
                cuda_ndarray.cuda_ndarray.select_a_gpu()
                use.device_number = active_device_number()
                # This is needed to initialize the cublas handle.
                gpu_init(use.device_number, config.lib.cnmem)

            if test_driver:
                import theano.sandbox.cuda.tests.test_driver
                theano.sandbox.cuda.tests.test_driver.test_nvidia_driver1()
            if device_properties(use.device_number)["warpSize"] != 32:
                raise ValueError("Your GPU has a warpSize != 32. Currently"
                                 " we have code that depends on this. Email"
                                 " the Theano mailing list to tell us about"
                                 " this new GPU as we don't know any with"
                                 " this property")

            if config.print_active_device:
                if config.lib.cnmem:
                    if config.lib.cnmem > 1:
                        cnmem_enabled = "enabled with initial size: %d MB" % config.lib.cnmem
                    else:
                        cnmem = min(config.lib.cnmem, 0.95) * 100
                        cnmem_enabled = "enabled with initial size: %.1f%% of memory" % cnmem
                else:
                    cnmem_enabled = "disabled"
                cudnn_version = "not available"
                warn = None
                try:
                    if dnn_available():
                        (hdr_v, runtime_v) = dnn_version()
                        cudnn_version = runtime_v
                        # 5200 should not print warning with cudnn 5 final.
                        if cudnn_version >= 5200:
                            warn = ("Your cuDNN version is more recent than the one"
                                    " Theano officially supports."
                                    " If you see any problems, try updating Theano or"
                                    " downgrading cuDNN to version 5.1.")
                except Exception:
                    cudnn_version = dnn_available.msg
                print("Using gpu device %d: %s (CNMeM is %s, cuDNN %s)" % (
                    active_device_number(),
                    active_device_name(),
                    cnmem_enabled,
                    cudnn_version,),
                      file=sys.stderr)
                if warn:
                    warnings.warn(warn)

            if device_properties(use.device_number)['regsPerBlock'] < 16384:
                # We will try to use too much register per bloc at many places
                # when there is only 8k register per multi-processor.
                _logger.warning(
                        "You are probably using an old GPU, that Theano"
                        " does not support."
                        " This means GPU code will most likely be slow AND may"
                        " crash when we try to use features"
                        " that your GPU does not support.")

        except (EnvironmentError, ValueError, RuntimeError) as e:
            _logger.error(("ERROR: Not using GPU."
                           " Initialisation of device %s failed:\n%s"),
                          str(device), e)
            cuda_enabled = False
            if force:
                e.args += (("You asked to force this device and it failed."
                            " No fallback to the cpu or other gpu device."),)
                raise

    elif use.device_number != device and device != 'gpu':
        _logger.warning(("Ignoring call to use(%s), GPU number %i "
            "is already in use."),
            str(device), use.device_number)

    if move_shared_float32_to_gpu:
        handle_shared_float32(True)

    if enable_cuda:
        cuda_enabled = True

    if default_to_move_computation_to_gpu:
        # Do not add inplace tag here. We do not want to
        # enable/disable gpu opt based on the inplace tag.
        optdb.add_tags('gpu_opt',
                       'fast_compile',
                       'fast_run')
        optdb.add_tags('gpu_after_fusion',
                       'fast_run')
        optdb.add_tags('gpu_scanOp_make_inplace',
                       'fast_run')

    if force:
        try:
            # in case the device if just gpu,
            # we check that the driver init it correctly.
            cuda_ndarray.cuda_ndarray.CudaNdarray.zeros((5, 5))
        except (Exception, NameError) as e:
            # NameError when no gpu present as cuda_ndarray is not loaded.
            e.args += ("ERROR: GPU forced but failed. ",)
            raise
开发者ID:Ungar7,项目名称:Theano,代码行数:101,代码来源:__init__.py

示例3: use

                raise

    elif use.device_number != device and device != 'gpu':
        _logger.warning(("Ignoring call to use(%s), GPU number %i "
            "is already in use."),
            str(device), use.device_number)

    if move_shared_float32_to_gpu:
        handle_shared_float32(True)

    if enable_cuda:
        cuda_enabled = True

    if default_to_move_computation_to_gpu:
        optdb.add_tags('gpu_opt',
                       'fast_compile',
                       'fast_run',
                       'inplace')
        optdb.add_tags('gpu_after_fusion',
                       'fast_run',
                       'inplace')

    if force:
        try:
            #in case the device if just gpu,
            # we check that the driver init it correctly.
            cuda_ndarray.cuda_ndarray.CudaNdarray.zeros((5, 5))
        except (Exception, NameError), e:
            # NameError when no gpu present as cuda_ndarray is not loaded.
            e.args += ("ERROR: GPU forced but failed. ",)
            raise
use.device_number = None
开发者ID:317070,项目名称:Theano,代码行数:32,代码来源:__init__.py

示例4: print

    pygpu_activated = True
    if config.print_active_device:
        print("Mapped name %s to device %s: %s" % (name, dev, context.devname),
              file=sys.stderr)

# This maps things like 'cuda0' to the context object on that device.
init_dev.devmap = {}

if pygpu:
    try:
        if (config.device.startswith('cuda') or
            config.device.startswith('opencl')):
            init_dev(config.device)
            import theano.compile
            theano.compile.shared_constructor(gpuarray_shared_constructor)
            optdb.add_tags('gpuarray_opt', 'fast_run', 'fast_compile')
        elif (config.init_gpu_device.startswith('cuda') or
              config.init_gpu_device.startswith('opencl')):
            if config.device != 'cpu':
                raise ValueError('you must set device=cpu to use init_gpu_device.')
            if config.contexts != '':
                print("Using contexts will make init_gpu_device act like device and move all computations by default, which might not be what you want.")
            init_dev(config.init_gpu_device)
        if config.contexts != '':
            for n, d in (c.split('->') for c in config.contexts.split(';')):
                init_dev(d.strip(), n.strip())
            import theano.compile
            theano.compile.shared_constructor(gpuarray_shared_constructor)
            optdb.add_tags('gpuarray_opt', 'fast_run', 'fast_compile')

        from .basic_ops import (GpuAlloc, GpuContiguous, GpuEye, GpuFromHost,
开发者ID:chaitan3,项目名称:Theano,代码行数:31,代码来源:__init__.py

示例5: import

# This is for documentation not to depend on the availability of pygpu
from type import (GpuArrayType, GpuArrayVariable, GpuArrayConstant,
                  GpuArraySharedVariable, gpuarray_shared_constructor)
import opt


def init_dev(dev):
    global pygpu_activated
    context = pygpu.init(dev)
    pygpu.set_default_context(context)
    pygpu_activated = True

if pygpu:
    try:
        if (config.device.startswith('cuda') or
            config.device.startswith('opencl')):
            init_dev(config.device)
            import theano.compile
            theano.compile.shared_constructor(gpuarray_shared_constructor)
            optdb.add_tags('gpuarray_opt', 'fast_run', 'inplace')
        elif config.gpuarray.init_device != '':
            init_dev(config.gpuarray.init_device)
    except Exception:
        error("Could not initialize pygpu, support disabled", exc_info=True)
else:
    if (config.gpuarray.init_device != '' or
        config.device.startswith('opencl') or
        config.device.startswith('cuda')):
        error("pygpu was configured but could not be imported", exc_info=True)
开发者ID:Donghuan,项目名称:Theano,代码行数:29,代码来源:__init__.py

示例6: init_dev


def init_dev(dev):
    global pygpu_activated
    context = pygpu.init(dev)
    pygpu.set_default_context(context)
    pygpu_activated = True
    if config.print_active_device:
        print >>sys.stderr, "Using device %s: %s" % (dev, context.devname)
    # remember the active device
    init_dev.device = dev


init_dev.device = None

if pygpu:
    try:
        if config.device.startswith("cuda") or config.device.startswith("opencl"):
            init_dev(config.device)
            import theano.compile

            theano.compile.shared_constructor(gpuarray_shared_constructor)
            optdb.add_tags("gpuarray_opt", "fast_run", "fast_compile", "inplace")
        elif config.gpuarray.init_device != "":
            init_dev(config.gpuarray.init_device)
    except Exception:
        error("Could not initialize pygpu, support disabled", exc_info=True)
else:
    if config.gpuarray.init_device != "" or config.device.startswith("opencl") or config.device.startswith("cuda"):
        error("pygpu was configured but could not be imported", exc_info=True)
开发者ID:amanrajdce,项目名称:Theano,代码行数:28,代码来源:__init__.py

示例7: use


#.........这里部分代码省略.........

    if device == "gpu":
        pass
    elif device.startswith("gpu"):
        device = int(device[3:])
    elif device == "cpu":
        device = -1
    else:
        raise ValueError("Invalid device identifier", device)
    if use.device_number is None:
        # No successful call to use() has been made yet
        if device != "gpu" and device < 0:
            return

        # Has PyCUDA already initialized the GPU context
        pycuda_init_dev = False
        if config.pycuda.init:
            import theano.misc.pycuda_init

            pycuda_init_dev = theano.misc.pycuda_init.pycuda_available

        try:
            if (device != "gpu") and not pycuda_init_dev:
                assert isinstance(device, int)
                gpu_init(device, config.lib.cumem)
                use.device_number = device
                assert active_device_number() == device
            else:
                # This mean the driver should select the GPU.  As we
                # need to get the device number now, we force the
                # selection of the GPU by the driver now and then we
                # query the active GPU. If we check the active GPU before
                # the device is initialized we will always receive 0
                # event if another device is selected later.
                cuda_ndarray.cuda_ndarray.select_a_gpu()
                use.device_number = active_device_number()
                # This is needed to initialize the cublas handle.
                gpu_init(use.device_number, config.lib.cumem)

            if test_driver:
                import theano.sandbox.cuda.tests.test_driver

                theano.sandbox.cuda.tests.test_driver.test_nvidia_driver1()
            if device_properties(use.device_number)["warpSize"] != 32:
                raise ValueError(
                    "Your GPU has a warpSize != 32. Currently"
                    " we have code that depends on this. Email"
                    " the Theano mailing list to tell us about"
                    " this new GPU as we don't know any with"
                    " this property"
                )

            if config.print_active_device:
                print("Using gpu device %d: %s" % (active_device_number(), active_device_name()), file=sys.stderr)
            if device_properties(use.device_number)["regsPerBlock"] < 16384:
                # We will try to use too much register per bloc at many places
                # when there is only 8k register per multi-processor.
                _logger.warning(
                    "You are probably using an old GPU, that Theano"
                    " does not support."
                    " This means GPU code will most likely be slow AND may"
                    " crash when we try to use features"
                    " that your GPU does not support."
                )

        except (EnvironmentError, ValueError, RuntimeError) as e:
            _logger.error(("ERROR: Not using GPU." " Initialisation of device %s failed:\n%s"), str(device), e)
            cuda_enabled = False
            if force:
                e.args += (
                    ("You asked to force this device and it failed." " No fallback to the cpu or other gpu device."),
                )
                raise

    elif use.device_number != device and device != "gpu":
        _logger.warning(
            ("Ignoring call to use(%s), GPU number %i " "is already in use."), str(device), use.device_number
        )

    if move_shared_float32_to_gpu:
        handle_shared_float32(True)

    if enable_cuda:
        cuda_enabled = True

    if default_to_move_computation_to_gpu:
        # Do not add inplace tag here. We do not want to
        # enable/disable gpu opt based on the inplace tag.
        optdb.add_tags("gpu_opt", "fast_compile", "fast_run")
        optdb.add_tags("gpu_after_fusion", "fast_run")

    if force:
        try:
            # in case the device if just gpu,
            # we check that the driver init it correctly.
            cuda_ndarray.cuda_ndarray.CudaNdarray.zeros((5, 5))
        except (Exception, NameError) as e:
            # NameError when no gpu present as cuda_ndarray is not loaded.
            e.args += ("ERROR: GPU forced but failed. ",)
            raise
开发者ID:orhanf,项目名称:configs,代码行数:101,代码来源:__init__.py

示例8: print

        print("Mapped name %s to device %s: %s %s" % (name, dev, context.devname, pcibusid), file=sys.stderr)
    pygpu_activated = True


# This maps things like 'cuda0' to the context object on that device.
init_dev.devmap = {}

if pygpu:
    try:
        if config.device.startswith("cuda") or config.device.startswith("opencl"):
            init_dev(config.device)
            import theano.compile

            theano.compile.shared_constructor(gpuarray_shared_constructor)
            optdb.add_tags("gpuarray_opt", "fast_run", "fast_compile")
            optdb.add_tags("gpua_scanOp_make_inplace", "fast_run")
        elif config.init_gpu_device.startswith("cuda") or config.init_gpu_device.startswith("opencl"):
            if config.device != "cpu":
                raise ValueError("you must set device=cpu to use init_gpu_device.")
            if config.contexts != "":
                print(
                    "Using contexts will make init_gpu_device act like device and move all computations by default, which might not be what you want."
                )
            init_dev(config.init_gpu_device)
        if config.contexts != "":
            for n, d in (c.split("->") for c in config.contexts.split(";")):
                init_dev(d.strip(), n.strip())
            import theano.compile

            theano.compile.shared_constructor(gpuarray_shared_constructor)
开发者ID:Theano,项目名称:Theano,代码行数:30,代码来源:__init__.py


注:本文中的theano.compile.optdb.add_tags函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。