本文整理汇总了Python中torch.utils.cpp_extension.CUDAExtension方法的典型用法代码示例。如果您正苦于以下问题:Python cpp_extension.CUDAExtension方法的具体用法?Python cpp_extension.CUDAExtension怎么用?Python cpp_extension.CUDAExtension使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.utils.cpp_extension
的用法示例。
在下文中一共展示了cpp_extension.CUDAExtension方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_extension
# 需要导入模块: from torch.utils import cpp_extension [as 别名]
# 或者: from torch.utils.cpp_extension import CUDAExtension [as 别名]
def get_extension(cuda_support):
# dir of this file
setup_dir = os.path.dirname(os.path.realpath(__file__))
# Where the cpp and cu files are
prefix = os.path.join(setup_dir, MODULE_BASE_NAME)
if not os.path.isdir(prefix):
raise ValueError('Did not find backend foler: {}'.format(prefix))
if cuda_support:
nvcc_avaible, nvcc_version = supported_nvcc_available()
if not nvcc_avaible:
print(_bold_warn_str('***WARN') + ': Found untested nvcc {}'.format(nvcc_version))
return CUDAExtension(
MODULE_BASE_NAME + '_gpu',
prefixed(prefix, ['torchac.cpp', 'torchac_kernel.cu']),
define_macros=[('COMPILE_CUDA', '1')])
else:
return CppExtension(
MODULE_BASE_NAME + '_cpu',
prefixed(prefix, ['torchac.cpp']))
# TODO:
# Add further supported version as specified in readme
示例2: make_cuda_ext
# 需要导入模块: from torch.utils import cpp_extension [as 别名]
# 或者: from torch.utils.cpp_extension import CUDAExtension [as 别名]
def make_cuda_ext(name, module, sources, include_dirs=[]):
define_macros = []
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [("WITH_CUDA", None)]
else:
raise EnvironmentError('CUDA is required to compile MMSkeleton!')
return CUDAExtension(
name='{}.{}'.format(module, name),
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
include_dirs=include_dirs,
extra_compile_args={
'cxx': [],
'nvcc': [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
})
示例3: make_cuda_ext
# 需要导入模块: from torch.utils import cpp_extension [as 别名]
# 或者: from torch.utils.cpp_extension import CUDAExtension [as 别名]
def make_cuda_ext(name, module, sources):
define_macros = []
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [("WITH_CUDA", None)]
else:
raise EnvironmentError('CUDA is required to compile Faster RCNN!')
return CUDAExtension(
name='{}.{}'.format(module, name),
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args={
'cxx': [],
'nvcc': [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
})
示例4: make_cuda_ext
# 需要导入模块: from torch.utils import cpp_extension [as 别名]
# 或者: from torch.utils.cpp_extension import CUDAExtension [as 别名]
def make_cuda_ext(name, module, sources, extra_compile_args={}):
if "cxx" not in extra_compile_args:
extra_compile_args["cxx"] = []
nvcc_flags = [
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
if "nvcc" not in extra_compile_args:
extra_compile_args["nvcc"] = nvcc_flags
else:
extra_compile_args["nvcc"] += nvcc_flags
return CUDAExtension(
name="{}.{}".format(module, name),
sources=[os.path.join(*module.split("."), p) for p in sources],
extra_compile_args=copy.deepcopy(extra_compile_args),
)
示例5: make_cuda_ext
# 需要导入模块: from torch.utils import cpp_extension [as 别名]
# 或者: from torch.utils.cpp_extension import CUDAExtension [as 别名]
def make_cuda_ext(name, sources):
return CUDAExtension(
name=name,
sources=[p for p in sources],
extra_compile_args={
'cxx': [],
'nvcc': [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
})
示例6: make_extension
# 需要导入模块: from torch.utils import cpp_extension [as 别名]
# 或者: from torch.utils.cpp_extension import CUDAExtension [as 别名]
def make_extension(name, package):
return CUDAExtension(
name="{}.{}._backend".format(package, name),
sources=find_sources(path.join("src", name)),
extra_compile_args={
"cxx": ["-O3"],
"nvcc": ["--expt-extended-lambda"],
},
include_dirs=["include/"],
)
示例7: make_cuda_ext
# 需要导入模块: from torch.utils import cpp_extension [as 别名]
# 或者: from torch.utils.cpp_extension import CUDAExtension [as 别名]
def make_cuda_ext(name, module, sources):
return CUDAExtension(
name='{}.{}'.format(module, name),
sources=[os.path.join(*module.split('.'), p) for p in sources],
extra_compile_args={
'cxx': [],
'nvcc': [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
})
示例8: make_cuda_ext
# 需要导入模块: from torch.utils import cpp_extension [as 别名]
# 或者: from torch.utils.cpp_extension import CUDAExtension [as 别名]
def make_cuda_ext(name, sources):
return CUDAExtension(
name='{}'.format(name), sources=[p for p in sources], extra_compile_args={
'cxx': [],
'nvcc': [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
})