本文整理匯總了Python中torch.utils.cpp_extension.CUDAExtension方法的典型用法代碼示例。如果您正苦於以下問題:Python cpp_extension.CUDAExtension方法的具體用法?Python cpp_extension.CUDAExtension怎麽用?Python cpp_extension.CUDAExtension使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.utils.cpp_extension
的用法示例。
在下文中一共展示了cpp_extension.CUDAExtension方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_extension
# 需要導入模塊: from torch.utils import cpp_extension [as 別名]
# 或者: from torch.utils.cpp_extension import CUDAExtension [as 別名]
def get_extension(cuda_support):
# dir of this file
setup_dir = os.path.dirname(os.path.realpath(__file__))
# Where the cpp and cu files are
prefix = os.path.join(setup_dir, MODULE_BASE_NAME)
if not os.path.isdir(prefix):
raise ValueError('Did not find backend foler: {}'.format(prefix))
if cuda_support:
nvcc_avaible, nvcc_version = supported_nvcc_available()
if not nvcc_avaible:
print(_bold_warn_str('***WARN') + ': Found untested nvcc {}'.format(nvcc_version))
return CUDAExtension(
MODULE_BASE_NAME + '_gpu',
prefixed(prefix, ['torchac.cpp', 'torchac_kernel.cu']),
define_macros=[('COMPILE_CUDA', '1')])
else:
return CppExtension(
MODULE_BASE_NAME + '_cpu',
prefixed(prefix, ['torchac.cpp']))
# TODO:
# Add further supported version as specified in readme
示例2: make_cuda_ext
# 需要導入模塊: from torch.utils import cpp_extension [as 別名]
# 或者: from torch.utils.cpp_extension import CUDAExtension [as 別名]
def make_cuda_ext(name, module, sources, include_dirs=[]):
define_macros = []
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [("WITH_CUDA", None)]
else:
raise EnvironmentError('CUDA is required to compile MMSkeleton!')
return CUDAExtension(
name='{}.{}'.format(module, name),
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
include_dirs=include_dirs,
extra_compile_args={
'cxx': [],
'nvcc': [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
})
示例3: make_cuda_ext
# 需要導入模塊: from torch.utils import cpp_extension [as 別名]
# 或者: from torch.utils.cpp_extension import CUDAExtension [as 別名]
def make_cuda_ext(name, module, sources):
define_macros = []
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [("WITH_CUDA", None)]
else:
raise EnvironmentError('CUDA is required to compile Faster RCNN!')
return CUDAExtension(
name='{}.{}'.format(module, name),
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args={
'cxx': [],
'nvcc': [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
})
示例4: make_cuda_ext
# 需要導入模塊: from torch.utils import cpp_extension [as 別名]
# 或者: from torch.utils.cpp_extension import CUDAExtension [as 別名]
def make_cuda_ext(name, module, sources, extra_compile_args={}):
if "cxx" not in extra_compile_args:
extra_compile_args["cxx"] = []
nvcc_flags = [
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
if "nvcc" not in extra_compile_args:
extra_compile_args["nvcc"] = nvcc_flags
else:
extra_compile_args["nvcc"] += nvcc_flags
return CUDAExtension(
name="{}.{}".format(module, name),
sources=[os.path.join(*module.split("."), p) for p in sources],
extra_compile_args=copy.deepcopy(extra_compile_args),
)
示例5: make_cuda_ext
# 需要導入模塊: from torch.utils import cpp_extension [as 別名]
# 或者: from torch.utils.cpp_extension import CUDAExtension [as 別名]
def make_cuda_ext(name, sources):
return CUDAExtension(
name=name,
sources=[p for p in sources],
extra_compile_args={
'cxx': [],
'nvcc': [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
})
示例6: make_extension
# 需要導入模塊: from torch.utils import cpp_extension [as 別名]
# 或者: from torch.utils.cpp_extension import CUDAExtension [as 別名]
def make_extension(name, package):
return CUDAExtension(
name="{}.{}._backend".format(package, name),
sources=find_sources(path.join("src", name)),
extra_compile_args={
"cxx": ["-O3"],
"nvcc": ["--expt-extended-lambda"],
},
include_dirs=["include/"],
)
示例7: make_cuda_ext
# 需要導入模塊: from torch.utils import cpp_extension [as 別名]
# 或者: from torch.utils.cpp_extension import CUDAExtension [as 別名]
def make_cuda_ext(name, module, sources):
return CUDAExtension(
name='{}.{}'.format(module, name),
sources=[os.path.join(*module.split('.'), p) for p in sources],
extra_compile_args={
'cxx': [],
'nvcc': [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
})
示例8: make_cuda_ext
# 需要導入模塊: from torch.utils import cpp_extension [as 別名]
# 或者: from torch.utils.cpp_extension import CUDAExtension [as 別名]
def make_cuda_ext(name, sources):
return CUDAExtension(
name='{}'.format(name), sources=[p for p in sources], extra_compile_args={
'cxx': [],
'nvcc': [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
})