当前位置: 首页>>代码示例>>Python>>正文


Python loader.load_op_library函数代码示例

本文整理汇总了Python中tensorflow.contrib.util.loader.load_op_library函数的典型用法代码示例。如果您正苦于以下问题:Python load_op_library函数的具体用法?Python load_op_library怎么用?Python load_op_library使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了load_op_library函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: zero_initializer

def zero_initializer(ref, use_locking=True, name="zero_initializer"):
  """Initialize 'ref' with all zeros, ref tensor should be uninitialized.
  If already initialized, you will get ValueError. This op is intended to
  save memory during initialization.
  Args:
    ref: ref of the tensor need to be zero initialized.
    name: optional name for this operation.
  Returns:
    ref that initialized.
  Raises:
    ValueError: If ref tensor is initialized.
  """
  loader.load_op_library(
      resource_loader.get_path_to_datafile("_variable_ops.so"))
  return gen_variable_ops.zero_initializer(ref, name=name)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:15,代码来源:variables.py

示例2: _maybe_load_nccl_ops_so

def _maybe_load_nccl_ops_so():
  """Loads nccl ops so if it hasn't been loaded already."""

  with _module_lock:
    global _nccl_ops_so
    if not _nccl_ops_so:
      _nccl_ops_so = loader.load_op_library(
          resource_loader.get_path_to_datafile('_nccl_ops.so'))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:8,代码来源:nccl_ops.py

示例3: Load

def Load():
  """Load training ops library and return the loaded module."""
  with _ops_lock:
    global _training_ops
    if not _training_ops:
      ops_path = resource_loader.get_path_to_datafile(TRAINING_OPS_FILE)
      logging.info('data path: %s', ops_path)
      _training_ops = loader.load_op_library(ops_path)

      assert _training_ops, 'Could not load _training_ops.so'
  return _training_ops
开发者ID:1000sprites,项目名称:tensorflow,代码行数:11,代码来源:training_ops.py

示例4: restore

  def restore(self, restored_tensors, unused_restored_shapes):
    """Restores the associated tree ensemble from 'restored_tensors'.

    Args:
      restored_tensors: the tensors that were loaded from a checkpoint.
      unused_restored_shapes: the shapes this object should conform to after
        restore. Not meaningful for trees.

    Returns:
      The operation that restores the state of the tree ensemble variable.
    """
    with ops.control_dependencies([self._create_op]):
      return self.deserialize(
          stamp_token=restored_tensors[0],
          num_updates=restored_tensors[1],
          partition_ids=restored_tensors[2],
          feature_ids=restored_tensors[3],
          gradients=restored_tensors[4],
          hessians=restored_tensors[5])

  def resource(self):
    return self._resource_handle


# Conditionally load ops, they might already be statically linked in.
try:
  _stats_accumulator_ops = loader.load_op_library(
      resource_loader.get_path_to_datafile("_stats_accumulator_ops.so"))
except (errors.NotFoundError, IOError):
  print("Error loading _stats_accumulator_ops.so")
开发者ID:Joetz,项目名称:tensorflow,代码行数:30,代码来源:stats_accumulator_ops.py

示例5: function_buffering_resource

# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for prefetching_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.data.python.ops import gen_prefetching_ops
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader

_prefetching_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile("../../_prefetching_ops.so"))


# TODO(rohanj): Add a python class that constructs resource in the __init__
# method and provides a get_next() that calls the prefetch op.
def function_buffering_resource(string_arg,
                                target_device,
                                shared_name,
                                f,
                                buffer_size,
                                thread_pool_size=1,
                                container="",
                                name=None):
  return gen_prefetching_ops.function_buffering_resource(
      string_arg=string_arg,
      target_device=target_device,
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:32,代码来源:prefetching_ops.py

示例6: _lstm_block_cell

import abc

from tensorflow.contrib.rnn.ops import gen_lstm_ops
from tensorflow.contrib.rnn.python.ops import fused_rnn_cell
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import resource_loader

_lstm_ops_so = loader.load_op_library(
    resource_loader.get_path_to_datafile("_lstm_ops.so"))


# pylint: disable=invalid-name
def _lstm_block_cell(x,
                     cs_prev,
                     h_prev,
                     w,
                     b,
                     wci=None,
                     wcf=None,
                     wco=None,
                     forget_bias=None,
                     cell_clip=None,
                     use_peephole=None,
                     name=None):
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:32,代码来源:lstm_ops.py

示例7:

# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python helper for loading kinesis ops and kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader

_dataset_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile("../../_dataset_ops.so"))
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:24,代码来源:kinesis_op_loader.py

示例8: _create_default_group_assignment

from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging

if platform.system() != "Windows":
  # pylint: disable=wildcard-import,unused-import,g-import-not-at-top
  from tensorflow.contrib.tpu.ops import gen_tpu_ops
  from tensorflow.contrib.tpu.ops.gen_tpu_ops import *

  from tensorflow.contrib.util import loader
  from tensorflow.python.platform import resource_loader
  # pylint: enable=wildcard-import,unused-import,g-import-not-at-top

  _tpu_ops = loader.load_op_library(
      resource_loader.get_path_to_datafile("_tpu_ops.so"))

  def _create_default_group_assignment():
    num_shards = tpu_function.get_tpu_context().number_of_shards
    if num_shards is None:
      logging.warning(
          "cross_replica_sum should be used within a tpu_shard_context, but "
          "got unset number_of_shards. Assuming 1.")
      num_shards = 1
    group_assignment = [list(range(num_shards))]
    return group_assignment

  def all_to_all(x,
                 concat_dimension,
                 split_dimension,
                 split_count,
开发者ID:baojianzhou,项目名称:tensorflow,代码行数:31,代码来源:tpu_ops.py

示例9: print

# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Split handler custom ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

# pylint: disable=wildcard-import
from tensorflow.contrib.boosted_trees.python.ops.gen_ensemble_optimizer_ops import *
# pylint: enable=wildcard-import
from tensorflow.contrib.util import loader
from tensorflow.python.framework import errors
from tensorflow.python.platform import resource_loader

# Conditionally load ops, they might already be statically linked in.
try:
  _ensemble_optimizer_ops = loader.load_op_library(
      resource_loader.get_path_to_datafile('_ensemble_optimizer_ops.so'))
except (errors.NotFoundError, IOError):
  print('Error loading _ensemble_optimizer_ops.so')
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:30,代码来源:ensemble_optimizer_ops.py

示例10:

# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for the reduce slice operators."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.reduce_slice_ops.ops import gen_reduce_slice_ops
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader


_reduce_slice_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile("_reduce_slice_ops.so"))


reduce_slice_sum = gen_reduce_slice_ops.reduce_slice_sum
reduce_slice_prod = gen_reduce_slice_ops.reduce_slice_prod
reduce_slice_max = gen_reduce_slice_ops.reduce_slice_max
reduce_slice_min = gen_reduce_slice_ops.reduce_slice_min
开发者ID:1000sprites,项目名称:tensorflow,代码行数:30,代码来源:reduce_slice_ops.py

示例11: obtain_next

# ==============================================================================
"""Python wrapper for input_pipeline_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import resource_loader


_input_pipeline_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile("_input_pipeline_ops.so"))


def obtain_next(string_list_tensor, counter):
  """Basic wrapper for the ObtainNextOp.

  Args:
    string_list_tensor: A tensor that is a list of strings
    counter: an int64 ref tensor to keep track of which element is returned.

  Returns:
    An op that produces the element at counter + 1 in the list, round
    robin style.
  """
  return _input_pipeline_ops.obtain_next(string_list_tensor, counter)
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:31,代码来源:input_pipeline_ops.py

示例12: bucketize

# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for bucketization operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader

_bucketization_op = loader.load_op_library(
    resource_loader.get_path_to_datafile("_bucketization_op.so"))


def bucketize(input_tensor, boundaries, name=None):
  """Bucketizes input_tensor by given boundaries.

  See bucketize_op.cc for more details.

  Args:
    input_tensor: A `Tensor` which will be bucketize.
    boundaries: A list of floats gives the boundaries. It has to be sorted.
    name: A name prefix for the returned tensors (optional).

  Returns:
    A `Tensor` with type int32 which indicates the corresponding bucket for
      each value in `input_tensor`.
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:32,代码来源:bucketization_op.py

示例13: random_hsv_in_yiq

# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for distort_image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import resource_loader

_distort_image_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile('_distort_image_ops.so'))


# pylint: disable=invalid-name
def random_hsv_in_yiq(image,
                      max_delta_hue=0,
                      lower_saturation=1,
                      upper_saturation=1,
                      lower_value=1,
                      upper_value=1,
                      seed=None):
  """Adjust hue, saturation, value of an RGB image randomly in YIQ color space.

  Equivalent to `adjust_yiq_hsv()` but uses a `delta_h` randomly
  picked in the interval `[-max_delta_hue, max_delta_hue]`, a `scale_saturation`
  randomly picked in the interval `[lower_saturation, upper_saturation]`, and
开发者ID:1000sprites,项目名称:tensorflow,代码行数:32,代码来源:distort_image_ops.py

示例14:

# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

# pylint: disable=unused-import
from tensorflow.contrib.periodic_resample.python.ops import gen_periodic_resample_op

from tensorflow.contrib.periodic_resample.python.ops.gen_periodic_resample_op import periodic_resample

from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
# pylint: enable=unused-import

_periodic_resample_op = loader.load_op_library(
    resource_loader.get_path_to_datafile('_periodic_resample_op.so'))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:30,代码来源:periodic_resample_op.py

示例15: all_sum

# ==============================================================================
"""Ops for GPU collective operations implemented using NVIDIA nccl."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import threading

from tensorflow.contrib.nccl.ops import gen_nccl_ops
from tensorflow.contrib.util import loader
from tensorflow.python.eager import context
from tensorflow.python.framework import device
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader

_nccl_ops_so = loader.load_op_library(
    resource_loader.get_path_to_datafile('_nccl_ops.so'))


def all_sum(tensors):
  """Returns a list of tensors with the all-reduce sum across `tensors`.

  The computation is done with an all-reduce operation, so if only some of the
  returned tensors are evaluated then the computation will hang.

  Args:
    tensors: The input tensors across which to sum; must be assigned
      to GPU devices.

  Returns:
    List of tensors, each with the sum of the input tensors, where tensor i has
    the same device as `tensors[i]`.
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:32,代码来源:nccl_ops.py


注:本文中的tensorflow.contrib.util.loader.load_op_library函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。