本文整理匯總了Python中torch.utils.ffi.create_extension方法的典型用法代碼示例。如果您正苦於以下問題:Python ffi.create_extension方法的具體用法?Python ffi.create_extension怎麽用?Python ffi.create_extension使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.utils.ffi
的用法示例。
在下文中一共展示了ffi.create_extension方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: build
# 需要導入模塊: from torch.utils import ffi [as 別名]
# 或者: from torch.utils.ffi import create_extension [as 別名]
def build(args):
extra_objects = args.objs
extra_objects += [a for a in glob.glob('/usr/local/cuda/lib64/*.a')]
ffi = create_extension(
'_ext.pointnet2',
headers=[a for a in glob.glob("cinclude/*_wrapper.h")],
sources=[a for a in glob.glob("csrc/*.c")],
define_macros=[('WITH_CUDA', None)],
relative_to=__file__,
with_cuda=True,
extra_objects=extra_objects,
include_dirs=[osp.join(base_dir, 'cinclude')],
verbose=False,
package=False
)
ffi.build()
示例2: is_torch_cuda
# 需要導入模塊: from torch.utils import ffi [as 別名]
# 或者: from torch.utils.ffi import create_extension [as 別名]
def is_torch_cuda():
try:
from torch.utils.ffi import create_extension
cuda_test_ext = create_extension(
name='horovod.torch.test_cuda',
headers=['horovod/torch/dummy.h'],
sources=[],
with_cuda=True,
extra_compile_args=['-std=c11', '-fPIC', '-O2']
)
cuda_test_ext.build()
return True
except:
print('INFO: Above error indicates that this PyTorch installation does not support CUDA.')
return False
示例3: build_torch_extension
# 需要導入模塊: from torch.utils import ffi [as 別名]
# 或者: from torch.utils.ffi import create_extension [as 別名]
def build_torch_extension(build_ext, options, abi_compile_flags):
check_torch_import()
have_cuda = is_torch_cuda()
if not have_cuda and check_macro(options['MACROS'], 'HAVE_CUDA'):
raise DistutilsPlatformError(
'Horovod build with GPU support was requested, but this PyTorch '
'installation does not support CUDA.')
# Update HAVE_CUDA to mean that PyTorch supports CUDA. Internally, we will be checking
# HOROVOD_GPU_(ALLREDUCE|ALLGATHER|BROADCAST) to decide whether we should use GPU
# version or transfer tensors to CPU memory for those operations.
updated_macros = set_macro(
options['MACROS'], 'HAVE_CUDA', str(int(have_cuda)))
# Create_extension overwrites these files which are customized, we need to protect them.
with protect_files('horovod/torch/mpi_lib/__init__.py',
'horovod/torch/mpi_lib_impl/__init__.py'):
from torch.utils.ffi import create_extension
ffi_iface = create_extension(
name='horovod.torch.mpi_lib',
headers=['horovod/torch/interface.h'] +
(['horovod/torch/interface_cuda.h'] if have_cuda else []),
with_cuda=have_cuda,
language='c',
package=True,
sources=[],
extra_compile_args=['-std=c11', '-fPIC', '-O2']
)
ffi_impl = create_extension(
name='horovod.torch.mpi_lib_impl',
headers=[],
with_cuda=have_cuda,
language='c++',
package=True,
source_extension='.cc',
define_macros=updated_macros,
include_dirs=options['INCLUDES'],
sources=options['SOURCES'] + ['horovod/torch/mpi_ops.cc',
'horovod/torch/handle_manager.cc',
'horovod/torch/ready_event.cc',
'horovod/torch/tensor_util.cc',
'horovod/torch/cuda_util.cc',
'horovod/torch/adapter.cc'],
extra_compile_args=options['COMPILE_FLAGS'] + abi_compile_flags,
extra_link_args=options['LINK_FLAGS'],
library_dirs=options['LIBRARY_DIRS'],
libraries=options['LIBRARIES']
)
for ffi, setuptools_ext in [(ffi_iface, torch_mpi_lib),
(ffi_impl, torch_mpi_lib_impl)]:
ffi_ext = ffi.distutils_extension()
# ffi_ext is distutils Extension, not setuptools Extension
for k, v in ffi_ext.__dict__.items():
setuptools_ext.__dict__[k] = v
build_ext.build_extension(setuptools_ext)
# run the customize_compiler