当前位置: 首页>>代码示例>>Python>>正文


Python ffi.create_extension方法代码示例

本文整理汇总了Python中torch.utils.ffi.create_extension方法的典型用法代码示例。如果您正苦于以下问题:Python ffi.create_extension方法的具体用法?Python ffi.create_extension怎么用?Python ffi.create_extension使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.utils.ffi的用法示例。


在下文中一共展示了ffi.create_extension方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build

# 需要导入模块: from torch.utils import ffi [as 别名]
# 或者: from torch.utils.ffi import create_extension [as 别名]
def build(args):
    extra_objects = args.objs
    extra_objects += [a for a in glob.glob('/usr/local/cuda/lib64/*.a')]

    ffi = create_extension(
        '_ext.pointnet2',
        headers=[a for a in glob.glob("cinclude/*_wrapper.h")],
        sources=[a for a in glob.glob("csrc/*.c")],
        define_macros=[('WITH_CUDA', None)],
        relative_to=__file__,
        with_cuda=True,
        extra_objects=extra_objects,
        include_dirs=[osp.join(base_dir, 'cinclude')],
        verbose=False,
        package=False
    )
    ffi.build() 
开发者ID:Yochengliu,项目名称:Relation-Shape-CNN,代码行数:19,代码来源:build_ffi.py

示例2: is_torch_cuda

# 需要导入模块: from torch.utils import ffi [as 别名]
# 或者: from torch.utils.ffi import create_extension [as 别名]
def is_torch_cuda():
    try:
        from torch.utils.ffi import create_extension
        cuda_test_ext = create_extension(
            name='horovod.torch.test_cuda',
            headers=['horovod/torch/dummy.h'],
            sources=[],
            with_cuda=True,
            extra_compile_args=['-std=c11', '-fPIC', '-O2']
        )
        cuda_test_ext.build()
        return True
    except:
        print('INFO: Above error indicates that this PyTorch installation does not support CUDA.')
        return False 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:17,代码来源:setup.py

示例3: build_torch_extension

# 需要导入模块: from torch.utils import ffi [as 别名]
# 或者: from torch.utils.ffi import create_extension [as 别名]
def build_torch_extension(build_ext, options, abi_compile_flags):
    check_torch_import()

    have_cuda = is_torch_cuda()
    if not have_cuda and check_macro(options['MACROS'], 'HAVE_CUDA'):
        raise DistutilsPlatformError(
            'Horovod build with GPU support was requested, but this PyTorch '
            'installation does not support CUDA.')

    # Update HAVE_CUDA to mean that PyTorch supports CUDA. Internally, we will be checking
    # HOROVOD_GPU_(ALLREDUCE|ALLGATHER|BROADCAST) to decide whether we should use GPU
    # version or transfer tensors to CPU memory for those operations.
    updated_macros = set_macro(
        options['MACROS'], 'HAVE_CUDA', str(int(have_cuda)))

    # Create_extension overwrites these files which are customized, we need to protect them.
    with protect_files('horovod/torch/mpi_lib/__init__.py',
                       'horovod/torch/mpi_lib_impl/__init__.py'):
        from torch.utils.ffi import create_extension
        ffi_iface = create_extension(
            name='horovod.torch.mpi_lib',
            headers=['horovod/torch/interface.h'] +
            (['horovod/torch/interface_cuda.h'] if have_cuda else []),
            with_cuda=have_cuda,
            language='c',
            package=True,
            sources=[],
            extra_compile_args=['-std=c11', '-fPIC', '-O2']
        )
        ffi_impl = create_extension(
            name='horovod.torch.mpi_lib_impl',
            headers=[],
            with_cuda=have_cuda,
            language='c++',
            package=True,
            source_extension='.cc',
            define_macros=updated_macros,
            include_dirs=options['INCLUDES'],
            sources=options['SOURCES'] + ['horovod/torch/mpi_ops.cc',
                                          'horovod/torch/handle_manager.cc',
                                          'horovod/torch/ready_event.cc',
                                          'horovod/torch/tensor_util.cc',
                                          'horovod/torch/cuda_util.cc',
                                          'horovod/torch/adapter.cc'],
            extra_compile_args=options['COMPILE_FLAGS'] + abi_compile_flags,
            extra_link_args=options['LINK_FLAGS'],
            library_dirs=options['LIBRARY_DIRS'],
            libraries=options['LIBRARIES']
        )

    for ffi, setuptools_ext in [(ffi_iface, torch_mpi_lib),
                                (ffi_impl, torch_mpi_lib_impl)]:
        ffi_ext = ffi.distutils_extension()
        # ffi_ext is distutils Extension, not setuptools Extension
        for k, v in ffi_ext.__dict__.items():
            setuptools_ext.__dict__[k] = v
        build_ext.build_extension(setuptools_ext)


# run the customize_compiler 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:62,代码来源:setup.py


注:本文中的torch.utils.ffi.create_extension方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。