本文整理汇总了Python中distutils.errors.LinkError方法的典型用法代码示例。如果您正苦于以下问题:Python errors.LinkError方法的具体用法?Python errors.LinkError怎么用?Python errors.LinkError使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类distutils.errors
的用法示例。
在下文中一共展示了errors.LinkError方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_cpp_flags
# 需要导入模块: from distutils import errors [as 别名]
# 或者: from distutils.errors import LinkError [as 别名]
def get_cpp_flags(build_ext):
last_err = None
default_flags = ['-std=c++11', '-fPIC', '-O2']
if sys.platform == 'darwin':
# Darwin most likely will have Clang, which has libc++.
flags_to_try = [default_flags + ['-stdlib=libc++'], default_flags]
else:
flags_to_try = [default_flags, default_flags + ['-stdlib=libc++']]
for cpp_flags in flags_to_try:
try:
test_compile(build_ext, 'test_cpp_flags', extra_preargs=cpp_flags,
code=textwrap.dedent('''\
#include <unordered_map>
void test() {
}
'''))
return cpp_flags
except (CompileError, LinkError):
last_err = 'Unable to determine C++ compilation flags (see error above).'
except Exception:
last_err = 'Unable to determine C++ compilation flags. ' \
'Last error:\n\n%s' % traceback.format_exc()
raise DistutilsPlatformError(last_err)
示例2: get_tf_libs
# 需要导入模块: from distutils import errors [as 别名]
# 或者: from distutils.errors import LinkError [as 别名]
def get_tf_libs(build_ext, lib_dirs, cpp_flags):
last_err = None
for tf_libs in [['tensorflow_framework'], []]:
try:
lib_file = test_compile(build_ext, 'test_tensorflow_libs',
library_dirs=lib_dirs, libraries=tf_libs,
extra_preargs=cpp_flags,
code=textwrap.dedent('''\
void test() {
}
'''))
from tensorflow.python.framework import load_library
load_library.load_op_library(lib_file)
return tf_libs
except (CompileError, LinkError):
last_err = 'Unable to determine -l link flags to use with TensorFlow (see error above).'
except Exception:
last_err = 'Unable to determine -l link flags to use with TensorFlow. ' \
'Last error:\n\n%s' % traceback.format_exc()
raise DistutilsPlatformError(last_err)
示例3: get_tf_abi
# 需要导入模块: from distutils import errors [as 别名]
# 或者: from distutils.errors import LinkError [as 别名]
def get_tf_abi(build_ext, include_dirs, lib_dirs, libs, cpp_flags):
last_err = None
cxx11_abi_macro = '_GLIBCXX_USE_CXX11_ABI'
for cxx11_abi in ['0', '1']:
try:
lib_file = test_compile(build_ext, 'test_tensorflow_abi',
macros=[(cxx11_abi_macro, cxx11_abi)],
include_dirs=include_dirs, library_dirs=lib_dirs,
libraries=libs, extra_preargs=cpp_flags,
code=textwrap.dedent('''\
#include <string>
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
void test() {
auto ignore = tensorflow::strings::StrCat("a", "b");
}
'''))
from tensorflow.python.framework import load_library
load_library.load_op_library(lib_file)
return cxx11_abi_macro, cxx11_abi
except (CompileError, LinkError):
last_err = 'Unable to determine CXX11 ABI to use with TensorFlow (see error above).'
except Exception:
last_err = 'Unable to determine CXX11 ABI to use with TensorFlow. ' \
'Last error:\n\n%s' % traceback.format_exc()
raise DistutilsPlatformError(last_err)
示例4: get_cuda_dirs
# 需要导入模块: from distutils import errors [as 别名]
# 或者: from distutils.errors import LinkError [as 别名]
def get_cuda_dirs(build_ext, cpp_flags):
cuda_include_dirs = []
cuda_lib_dirs = []
cuda_home = os.environ.get('HOROVOD_CUDA_HOME')
if cuda_home:
cuda_include_dirs += ['%s/include' % cuda_home]
cuda_lib_dirs += ['%s/lib' % cuda_home, '%s/lib64' % cuda_home]
cuda_include = os.environ.get('HOROVOD_CUDA_INCLUDE')
if cuda_include:
cuda_include_dirs += [cuda_include]
cuda_lib = os.environ.get('HOROVOD_CUDA_LIB')
if cuda_lib:
cuda_lib_dirs += [cuda_lib]
if not cuda_include_dirs and not cuda_lib_dirs:
# default to /usr/local/cuda
cuda_include_dirs += ['/usr/local/cuda/include']
cuda_lib_dirs += ['/usr/local/cuda/lib', '/usr/local/cuda/lib64']
try:
test_compile(build_ext, 'test_cuda', libraries=['cudart'], include_dirs=cuda_include_dirs,
library_dirs=cuda_lib_dirs, extra_preargs=cpp_flags, code=textwrap.dedent('''\
#include <cuda_runtime.h>
void test() {
cudaSetDevice(0);
}
'''))
except (CompileError, LinkError):
raise DistutilsPlatformError(
'CUDA library was not found (see error above).\n'
'Please specify correct CUDA location with the HOROVOD_CUDA_HOME '
'environment variable or combination of HOROVOD_CUDA_INCLUDE and '
'HOROVOD_CUDA_LIB environment variables.\n\n'
'HOROVOD_CUDA_HOME - path where CUDA include and lib directories can be found\n'
'HOROVOD_CUDA_INCLUDE - path to CUDA include directory\n'
'HOROVOD_CUDA_LIB - path to CUDA lib directory')
return cuda_include_dirs, cuda_lib_dirs
示例5: get_cpp_flags
# 需要导入模块: from distutils import errors [as 别名]
# 或者: from distutils.errors import LinkError [as 别名]
def get_cpp_flags(build_ext):
last_err = None
default_flags = ['-std=c++11', '-fPIC', '-O2', '-Wall']
# avx_flags = ['-mf16c', '-mavx'] if check_avx_supported() else []
avx_flags = []
flags_to_try = [
default_flags,
default_flags + ['-stdlib=libc++']
]
if avx_flags:
flags_to_try.append(default_flags + avx_flags)
flags_to_try.append(default_flags + ['-stdlib=libc++'] + avx_flags)
for cpp_flags in flags_to_try:
try:
test_compile(
build_ext, 'test_cpp_flags',
extra_compile_preargs=cpp_flags,
code=textwrap.dedent(
'''\
#include <unordered_map>
void test() {
}
'''
)
)
return cpp_flags
except (CompileError, LinkError):
last_err = 'Unable to determine C++ compilation flags (see error above).'
except Exception:
last_err = 'Unable to determine C++ compilation flags. ' \
'Last error:\n\n%s' % traceback.format_exc()
raise DistutilsPlatformError(last_err)
示例6: get_link_flags
# 需要导入模块: from distutils import errors [as 别名]
# 或者: from distutils.errors import LinkError [as 别名]
def get_link_flags(build_ext):
last_err = None
libtool_flags = ['-Wl,-exported_symbols_list']
ld_flags = []
flags_to_try = [ld_flags, libtool_flags]
for link_flags in flags_to_try:
try:
test_compile(build_ext, 'test_link_flags',
extra_link_preargs=link_flags,
code=textwrap.dedent('''\
void test() {
}
'''))
return link_flags
except (CompileError, LinkError):
last_err = 'Unable to determine C++ link flags (see error above).'
except Exception:
last_err = 'Unable to determine C++ link flags. ' \
'Last error:\n\n%s' % traceback.format_exc()
raise DistutilsPlatformError(last_err)
示例7: get_tf_libs
# 需要导入模块: from distutils import errors [as 别名]
# 或者: from distutils.errors import LinkError [as 别名]
def get_tf_libs(build_ext, lib_dirs, cpp_flags):
for tf_libs in [['tensorflow_framework'], []]:
try:
lib_file = test_compile(
build_ext,
'test_tensorflow_libs',
library_dirs=lib_dirs,
libraries=tf_libs,
extra_compile_preargs=cpp_flags,
code=textwrap.dedent('''\
void test() {
}
'''))
from tensorflow.python.framework import load_library
load_library.load_op_library(lib_file)
return tf_libs
except (CompileError, LinkError):
last_err = 'Unable to determine -l link flags to use with TensorFlow (see error above).'
except Exception:
last_err = 'Unable to determine -l link flags to use with TensorFlow. Last error:\n\n%s' % \
traceback.format_exc()
raise DistutilsPlatformError(last_err)
示例8: get_tf_abi
# 需要导入模块: from distutils import errors [as 别名]
# 或者: from distutils.errors import LinkError [as 别名]
def get_tf_abi(build_ext, include_dirs, lib_dirs, libs, cpp_flags):
cxx11_abi_macro = '_GLIBCXX_USE_CXX11_ABI'
for cxx11_abi in ['0', '1']:
try:
lib_file = test_compile(build_ext, 'test_tensorflow_abi',
macros=[(cxx11_abi_macro, cxx11_abi)],
include_dirs=include_dirs,
library_dirs=lib_dirs,
libraries=libs,
extra_compile_preargs=cpp_flags,
code=textwrap.dedent('''\
#include <string>
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
void test() {
auto ignore = tensorflow::strings::StrCat("a", "b");
}
''')
)
from tensorflow.python.framework import load_library
load_library.load_op_library(lib_file)
return cxx11_abi_macro, cxx11_abi
except (CompileError, LinkError):
last_err = 'Unable to determine CXX11 ABI to use with TensorFlow (see error above).'
except Exception:
last_err = 'Unable to determine CXX11 ABI to use with TensorFlow. ' \
'Last error:\n\n%s' % traceback.format_exc()
raise DistutilsPlatformError(last_err)
示例9: has_c_library
# 需要导入模块: from distutils import errors [as 别名]
# 或者: from distutils.errors import LinkError [as 别名]
def has_c_library(library, extension=".c"):
"""Check whether a C/C++ library is available on the system to the compiler.
Parameters
----------
library: str
The library we want to check for e.g. if we are interested in FFTW3, we
want to check for `fftw3.h`, so this parameter will be `fftw3`.
extension: str
If we want to check for a C library, the extension is `.c`, for C++
`.cc`, `.cpp` or `.cxx` are accepted.
Returns
-------
bool
Whether or not the library is available.
"""
with tempfile.TemporaryDirectory(dir=".") as directory:
name = join(directory, "%s%s" % (library, extension))
with open(name, "w") as f:
f.write("#include <%s.h>\n" % library)
f.write("int main() {}\n")
# Get a compiler instance
compiler = ccompiler.new_compiler()
# Configure compiler to do all the platform specific things
customize_compiler(compiler)
# Add conda include dirs
for inc_dir in get_include_dirs():
compiler.add_include_dir(inc_dir)
assert isinstance(compiler, ccompiler.CCompiler)
try:
# Try to compile the file using the C compiler
compiler.link_executable(compiler.compile([name]), name)
return True
except (CompileError, LinkError):
return False
示例10: get_nccl_vals
# 需要导入模块: from distutils import errors [as 别名]
# 或者: from distutils.errors import LinkError [as 别名]
def get_nccl_vals(build_ext, cuda_include_dirs, cuda_lib_dirs, cpp_flags):
nccl_include_dirs = []
nccl_lib_dirs = []
nccl_libs = []
nccl_home = os.environ.get('HOROVOD_NCCL_HOME')
if nccl_home:
nccl_include_dirs += ['%s/include' % nccl_home]
nccl_lib_dirs += ['%s/lib' % nccl_home, '%s/lib64' % nccl_home]
nccl_include_dir = os.environ.get('HOROVOD_NCCL_INCLUDE')
if nccl_include_dir:
nccl_include_dirs += [nccl_include_dir]
nccl_lib_dir = os.environ.get('HOROVOD_NCCL_LIB')
if nccl_lib_dir:
nccl_lib_dirs += [nccl_lib_dir]
nccl_link_mode = os.environ.get('HOROVOD_NCCL_LINK', 'STATIC')
if nccl_link_mode.upper() == 'SHARED':
nccl_libs += ['nccl']
else:
nccl_libs += ['nccl_static']
try:
test_compile(build_ext, 'test_nccl', libraries=nccl_libs, include_dirs=nccl_include_dirs + cuda_include_dirs,
library_dirs=nccl_lib_dirs + cuda_lib_dirs, extra_preargs=cpp_flags, code=textwrap.dedent('''\
#include <nccl.h>
#if NCCL_MAJOR < 2
#error Horovod requires NCCL 2.0 or later version, please upgrade.
#endif
void test() {
ncclUniqueId nccl_id;
ncclGetUniqueId(&nccl_id);
}
'''))
except (CompileError, LinkError):
raise DistutilsPlatformError(
'NCCL 2.0 library or its later version was not found (see error above).\n'
'Please specify correct NCCL location with the HOROVOD_NCCL_HOME '
'environment variable or combination of HOROVOD_NCCL_INCLUDE and '
'HOROVOD_NCCL_LIB environment variables.\n\n'
'HOROVOD_NCCL_HOME - path where NCCL include and lib directories can be found\n'
'HOROVOD_NCCL_INCLUDE - path to NCCL include directory\n'
'HOROVOD_NCCL_LIB - path to NCCL lib directory')
return nccl_include_dirs, nccl_lib_dirs, nccl_libs
示例11: check_openmp_support
# 需要导入模块: from distutils import errors [as 别名]
# 或者: from distutils.errors import LinkError [as 别名]
def check_openmp_support():
"""Check whether OpenMP test code can be compiled and run."""
ccompiler = new_compiler()
customize_compiler(ccompiler)
with tempfile.TemporaryDirectory() as tmp_dir:
try:
os.chdir(tmp_dir)
# Write test program
with open("test_openmp.c", "w") as cfile:
cfile.write(CCODE)
os.mkdir("objects")
# Compile, test program
openmp_flags = get_openmp_flag(ccompiler)
ccompiler.compile(
["test_openmp.c"],
output_dir="objects",
extra_postargs=openmp_flags,
)
# Link test program
extra_preargs = os.getenv("LDFLAGS", None)
if extra_preargs is not None:
extra_preargs = extra_preargs.split(" ")
else:
extra_preargs = []
objects = glob.glob(
os.path.join("objects", "*" + ccompiler.obj_extension)
)
ccompiler.link_executable(
objects,
"test_openmp",
extra_preargs=extra_preargs,
extra_postargs=openmp_flags,
)
# Run test program
output = subprocess.check_output("./test_openmp")
output = output.decode(sys.stdout.encoding or "utf-8").splitlines()
# Check test program output
if "nthreads=" in output[0]:
nthreads = int(output[0].strip().split("=")[1])
openmp_supported = len(output) == nthreads
else:
openmp_supported = False
openmp_flags = []
except (CompileError, LinkError, subprocess.CalledProcessError):
openmp_supported = False
openmp_flags = []
finally:
os.chdir(HERE)
return openmp_supported, openmp_flags
# openmp ######################################################################
示例12: get_cuda_dirs
# 需要导入模块: from distutils import errors [as 别名]
# 或者: from distutils.errors import LinkError [as 别名]
def get_cuda_dirs(build_ext, cpp_flags):
cuda_include_dirs = []
cuda_lib_dirs = []
cuda_home = os.environ.get('CUDA_HOME')
cuda_lib = os.environ.get('CUDA_LIB')
cuda_include = os.environ.get('CUDA_INCLUDE')
if cuda_home and os.path.exists(cuda_home):
for _dir in ['%s/include' % cuda_home]:
if os.path.exists(_dir):
cuda_include_dirs.append(_dir)
for _dir in ['%s/lib' % cuda_home, '%s/lib64' % cuda_home]:
if os.path.exists(_dir):
cuda_lib_dirs.append(_dir)
if cuda_include and os.path.exists(cuda_include) and cuda_include not in cuda_include_dirs:
cuda_include_dirs.append(cuda_include)
if cuda_lib and os.path.exists(cuda_lib) and cuda_lib not in cuda_lib_dirs:
cuda_lib_dirs.append(cuda_lib)
if not cuda_include_dirs and not cuda_lib_dirs:
# default to /usr/local/cuda
cuda_include_dirs += ['/usr/local/cuda/include']
cuda_lib_dirs += ['/usr/local/cuda/lib', '/usr/local/cuda/lib64']
try:
test_compile(
build_ext,
'test_cuda',
libraries=['cudart'],
include_dirs=cuda_include_dirs,
library_dirs=cuda_lib_dirs,
extra_compile_preargs=cpp_flags,
code=textwrap.dedent(
'''\
#include <cuda_runtime.h>
void test() {
cudaSetDevice(0);
}
'''
)
)
except (CompileError, LinkError):
raise DistutilsPlatformError(
'CUDA library was not found (see error above).\n'
'Please specify correct CUDA location with the CUDA_HOME '
'environment variable or combination of CUDA_INCLUDE and '
'CUDA_LIB environment variables.\n\n'
'CUDA_HOME - path where CUDA include and lib directories can be found\n'
'CUDA_INCLUDE - path to CUDA include directory\n'
'CUDA_LIB - path to CUDA lib directory'
)
return cuda_include_dirs, cuda_lib_dirs
示例13: check_required_library
# 需要导入模块: from distutils import errors [as 别名]
# 或者: from distutils.errors import LinkError [as 别名]
def check_required_library(libname, libraries=None, include_dir=None):
"""
Check if the required shared library exists
:param libname: The name of shared library
:type libname: str
:return True if the required shared lib exists else false
:rtype: bool
"""
build_success = True
tmp_dir = tempfile.mkdtemp(prefix='tmp_' + libname + '_')
bin_file_name = os.path.join(tmp_dir, 'test_' + libname)
file_name = bin_file_name + '.c'
with open(file_name, 'w') as filep:
filep.write(LIBNAME_CODE_DICT[libname])
compiler = distutils.ccompiler.new_compiler()
distutils.sysconfig.customize_compiler(compiler)
try:
compiler.link_executable(
compiler.compile([file_name],
include_dirs=include_dir),
bin_file_name,
libraries=libraries,
)
except CompileError:
build_success = False
except LinkError:
build_success = False
finally:
shutil.rmtree(tmp_dir)
if build_success:
return True
err_msg = "The development package for " + \
libname + " is required " + \
"for the compilation of roguehostapd. " + \
"Please install it and " + \
"rerun the script (e.g. on Debian-based systems " \
"run: apt-get install "
if libname == "openssl":
err_msg += "libssl-dev"
else:
err_msg += "libnl-3-dev libnl-genl-3-dev"
sys.exit(err_msg)