本文整理汇总了Python中tensorflow.load_op_library函数的典型用法代码示例。如果您正苦于以下问题:Python load_op_library函数的具体用法?Python load_op_library怎么用?Python load_op_library使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了load_op_library函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_custom_op
def load_custom_op(self, custom_op_paths):
custom_op_path_list = custom_op_paths.split(",")
for custom_op_path in custom_op_path_list:
if os.path.isdir(custom_op_path):
for filename in os.listdir(custom_op_path):
if filename.endswith(".so"):
op_filepath = os.path.join(custom_op_path, filename)
logger.info("Load the so file from: {}".format(op_filepath))
tf.load_op_library(op_filepath)
else:
logger.error("The path does not exist: {}".format(custom_op_path))
示例2: build_plasma_tensorflow_op
def build_plasma_tensorflow_op():
global tf_plasma_op
try:
import tensorflow as tf
print("TensorFlow version: " + tf.__version__)
except ImportError:
pass
else:
print("Compiling Plasma TensorFlow Op...")
dir_path = os.path.dirname(os.path.realpath(__file__))
cc_path = os.path.join(dir_path, "tensorflow", "plasma_op.cc")
so_path = os.path.join(dir_path, "tensorflow", "plasma_op.so")
tf_cflags = tf.sysconfig.get_compile_flags()
if sys.platform == 'darwin':
tf_cflags = ["-undefined", "dynamic_lookup"] + tf_cflags
cmd = ["g++", "-std=c++11", "-g", "-shared", cc_path,
"-o", so_path, "-DNDEBUG", "-I" + pa.get_include()]
cmd += ["-L" + dir for dir in pa.get_library_dirs()]
cmd += ["-lplasma", "-larrow_python", "-larrow", "-fPIC"]
cmd += tf_cflags
cmd += tf.sysconfig.get_link_flags()
cmd += ["-O2"]
if tf.test.is_built_with_cuda():
cmd += ["-DGOOGLE_CUDA"]
print("Running command " + str(cmd))
subprocess.check_call(cmd)
tf_plasma_op = tf.load_op_library(TF_PLASMA_OP_PATH)
示例3: __init__
def __init__(self, run_dir):
r = 10.
game_params = {
'r': r,
'dt': 1./9,
'host_speed': 10/3.6,
'target_speed': 5.,
'num_of_targets': 5,
}
self._connect(game_params)
self._train_params()
self.fig = plt.figure()
self.ax = plt.subplot2grid((2, 2), (0, 0), colspan=2, rowspan=2)
self.run_dir = run_dir
subprocess.Popen(self.run_dir + "./simulator")
self.pipe_module = tf.load_op_library(self.run_dir + 'pipe.so')
plt.ion()
plt.show()
示例4: do_test
def do_test(self):
test = []
test = self.d.gpu_test
assemble_module = tf.load_op_library('./../assemble_boxes_gpu.so')
with tf.Session():
with tf.device("/gpu:0"):
if len(test) == 0:
test = [1,1,3,3,1,15]
test.extend([4,1,3,3,2,15])
test.extend([7,1,3,3,3,15])
test.extend([1,4,3,3,4,15])
test.extend([4,4,3,3,5,15])
test.extend([7,4,3,3,6,15])
test.extend([1,7,3,3,7,15])
test.extend([4,7,3,3,8,15])
test.extend([7,7,3,3,9,15])
test.extend([6,9, 16])
test = tf.constant(test, dtype=tf.uint16)
#test = tf.cast(test, dtype=tf.int32)
print test
#result = assemble_module.assemble_boxes_op(test)
result = assemble_module.assemble_boxes_cpu(test)
self.r = result.eval()
s = []
for i in range(len(self.r) // 6):
print(self.r[ i * 6: i * 6 + 6])
g = self.r[i * 6 + 4]
if not g in s:
s.append(g)
print "simple list:" , s
示例5: _load_dynamiclib_module
def _load_dynamiclib_module():
if Operator._dynamiclibop_module is None:
libname = 'dynamiclibop.so.' + version
dynamiclibop_path = os.path.join(cache_directory, libname)
if not os.path.exists(dynamiclibop_path):
# build the library if it does not exist already
tf_include = tf.sysconfig.get_include()
# resolve the directory of this file
this_file_path = os.path.abspath(__file__)
this_directory = os.path.split(this_file_path)[0]
try:
if cuda_enabled:
tf.logging.log(tf.logging.INFO, '*** building dynamiclibop for GPU')
subprocess.check_output(['g++', '-fPIC', '-Wall', '-shared',
'-std=c++11', '-O2', '-Wextra', '-DGOOGLE_CUDA=1',
'-o', dynamiclibop_path,
this_directory + '/dynamiclibop.cc',
'-isystem', cuda_directory + '/include',
'-isystem', tf_include],
stderr=subprocess.STDOUT,
universal_newlines=True)
else:
tf.logging.log(tf.logging.INFO, '*** building dynamiclibop for CPU')
subprocess.check_output(['g++', '-fPIC', '-Wall', '-shared',
'-std=c++11', '-O2', '-Wextra',
'-o', dynamiclibop_path,
this_directory + '/dynamiclibop.cc',
'-isystem', tf_include],
stderr=subprocess.STDOUT,
universal_newlines=True)
except subprocess.CalledProcessError as exception:
tf.logging.log(tf.logging.ERROR, 'g++ error: ' + exception.output)
raise
Operator._dynamiclibop_module = tf.load_op_library(dynamiclibop_path)
示例6: f_segm_match
def f_segm_match(iou, s_gt):
"""Matching between segmentation output and groundtruth.
Args:
y_out: [B, T, H, W], output segmentations
y_gt: [B, T, H, W], groundtruth segmentations
s_gt: [B, T], groudtruth score sequence
"""
global hungarian_module
if hungarian_module is None:
hungarian_module = tf.load_op_library('hungarian.so')
log.info('Loaded library "hungarian.so"')
pass
# Mask X, [B, M] => [B, 1, M]
mask_x = tf.expand_dims(s_gt, dim=1)
# Mask Y, [B, M] => [B, N, 1]
mask_y = tf.expand_dims(s_gt, dim=2)
iou_mask = iou * mask_x * mask_y
# Keep certain precision so that we can get optimal matching within
# reasonable time.
eps = 1e-5
precision = 1e6
iou_mask = tf.round(iou_mask * precision) / precision
match_eps = hungarian_module.hungarian(iou_mask + eps)[0]
# [1, N, 1, 1]
s_gt_shape = tf.shape(s_gt)
num_segm_out = s_gt_shape[1]
num_segm_out_mul = tf.pack([1, num_segm_out, 1])
# Mask the graph algorithm output.
match = match_eps * mask_x * mask_y
return match
示例7: testBasic
def testBasic(self):
library_filename = os.path.join(tf.resource_loader.get_data_files_path(),
'duplicate_op.so')
duplicate = tf.load_op_library(library_filename)
self.assertEqual(len(duplicate.OP_LIST.op), 0)
with self.test_session():
self.assertEqual(tf.add(1, 41).eval(), 42)
示例8: testBasic
def testBasic(self):
library_filename = os.path.join(tf.resource_loader.get_data_files_path(), "ackermann_op.so")
ackermann = tf.load_op_library(library_filename)
self.assertEqual(len(ackermann.OP_LIST.op), 1)
self.assertEqual(ackermann.OP_LIST.op[0].name, "Ackermann")
with self.test_session():
self.assertEqual(ackermann.ackermann().eval(), "A(m, 0) == A(m-1, 1)")
示例9: Load
def Load():
"""Load the TopN ops library and return the loaded module."""
with _ops_lock:
global _topn_ops
if not _topn_ops:
ops_path = tf.resource_loader.get_path_to_datafile(TOPN_OPS_FILE)
tf.logging.info('data path: %s', ops_path)
_topn_ops = tf.load_op_library(ops_path)
assert _topn_ops, 'Could not load topn_ops.so'
return _topn_ops
示例10: Load
def Load(library_base_dir=""):
"""Load the quantized ops library and return the loaded module."""
with _ops_lock:
global _quantized_ops
if not _quantized_ops:
data_files_path = os.path.join(library_base_dir, tf.resource_loader.get_data_files_path())
tf.logging.info("q:data path: %s", data_files_path)
_quantized_ops = tf.load_op_library(os.path.join(data_files_path, QUANTIZED_OPS_FILE))
assert _quantized_ops, "Could not load quantized_ops.so"
return _quantized_ops
示例11: Load
def Load():
"""Load training ops library and return the loaded module."""
with _ops_lock:
global _training_ops
if not _training_ops:
data_files_path = tf.resource_loader.get_data_files_path()
tf.logging.info('data path: %s', data_files_path)
_training_ops = tf.load_op_library(os.path.join(
data_files_path, TRAINING_OPS_FILE))
assert _training_ops, 'Could not load _training_ops.so'
return _training_ops
示例12: Load
def Load():
"""Load the inference ops library and return the loaded module."""
with _ops_lock:
global _inference_ops
if not _inference_ops:
data_files_path = tf.resource_loader.get_data_files_path()
tf.logging.info('data path: %s', data_files_path)
_inference_ops = tf.load_op_library(os.path.join(
data_files_path, INFERENCE_OPS_FILE))
assert _inference_ops, 'Could not load inference_ops.so'
return _inference_ops
示例13: Load
def Load(library_base_dir=''):
"""Load the quantized ops library and return the loaded module."""
with _kernels_lock:
global _quantized_kernels
if not _quantized_kernels:
data_files_path = os.path.join(library_base_dir,
tf.resource_loader.get_data_files_path())
tf.logging.info('data path: %s', data_files_path)
_quantized_kernels = tf.load_op_library(os.path.join(
data_files_path, QUANTIZED_KERNELS_FILE))
assert _quantized_kernels, 'Could not load _quantized_kernels.so'
return _quantized_kernels
示例14: load
def load(library_path):
fuzzy_module = tf.load_op_library(library_path)
@ops.RegisterGradient("FuzzyCTCLoss")
def _FuzzyCTCLossGrad(op, grad_loss, _):
grad_without_gradient = array_ops.prevent_gradient(
op.outputs[1], message="Currently there is no way to take the second "
" derivative of ctc_loss due to the fused implementation's interaction "
" with tf.gradients()")
return [_BroadcastMul(tf.expand_dims(grad_loss, -1), grad_without_gradient), None, None, None]
def fuzzy_ctc_greedy_decoder(inputs, sequence_length):
outputs = fuzzy_module.fuzzy_ctc_greedy_decoder(inputs, sequence_length)
(decoded_ix, decoded_val, decoded_shape, log_probabilities) = outputs
return ([sparse_tensor.SparseTensor(decoded_ix, decoded_val, decoded_shape)],
log_probabilities)
return {"module": fuzzy_module, "decoder_op": fuzzy_ctc_greedy_decoder}
示例15: roi_pooling
import tensorflow as tf
from tensorflow.python.framework import ops
import os
module_path = os.path.realpath(__file__)
module_dir = os.path.dirname(module_path)
lib_path = os.path.join(module_dir, 'roi_pooling.so')
roi_pooling_module = tf.load_op_library(lib_path)
def roi_pooling(input, rois, pool_height, pool_width):
"""
returns a tensorflow operation for computing the Region of Interest Pooling
@arg input: feature maps on which to perform the pooling operation
@arg rois: list of regions of interest in the format (feature map index, upper left, bottom right)
@arg pool_width: size of the pooling sections
"""
# TODO(maciek): ops scope
out = roi_pooling_module.roi_pooling(input, rois, pool_height=pool_height, pool_width=pool_width)
output, argmax_output = out[0], out[1]
return output
@ops.RegisterGradient("RoiPooling")
def _RoiPoolingGrad(op, *grads):
orig_inputs = op.inputs[0]
orig_rois = op.inputs[1]
orig_output = op.outputs[0]
orig_argmax_output = op.outputs[1]
orig_output_grad = grads[0]