当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow_hub.Module方法代码示例

本文整理汇总了Python中tensorflow_hub.Module方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow_hub.Module方法的具体用法?Python tensorflow_hub.Module怎么用?Python tensorflow_hub.Module使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow_hub的用法示例。


在下文中一共展示了tensorflow_hub.Module方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_module_graph

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import Module [as 别名]
def create_module_graph(module_spec):
  """Creates a graph and loads Hub Module into it.

  Args:
    module_spec: the hub.ModuleSpec for the image module being used.

  Returns:
    graph: the tf.Graph that was created.
    bottleneck_tensor: the bottleneck values output by the module.
    resized_input_tensor: the input images, resized as expected by the module.
    wants_quantization: a boolean, whether the module has been instrumented
      with fake quantization ops.
  """
  height, width = hub.get_expected_image_size(module_spec)
  with tf.Graph().as_default() as graph:
    resized_input_tensor = tf.placeholder(tf.float32, [None, height, width, 3])
    m = hub.Module(module_spec)
    bottleneck_tensor = m(resized_input_tensor)
    wants_quantization = any(node.op in FAKE_QUANT_OPS
                             for node in graph.as_graph_def().node)
  return graph, bottleneck_tensor, resized_input_tensor, wants_quantization 
开发者ID:hthuwal,项目名称:sign-language-gesture-recognition,代码行数:23,代码来源:retrain.py

示例2: export_module_spec_with_checkpoint

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import Module [as 别名]
def export_module_spec_with_checkpoint(module_spec,
                                       checkpoint_path,
                                       export_path,
                                       scope_prefix=""):
  """Exports given checkpoint as tfhub module with given spec."""

  # The main requirement is that it is possible to know how to map from
  # module variable name to checkpoint variable name.
  # This is trivial if the original code used variable scopes,
  # but can be messy if the variables to export are interwined
  # with variables not export.
  with tf.Graph().as_default():
    m = hub.Module(module_spec)
    assign_map = {
        scope_prefix + name: value for name, value in m.variable_map.items()
    }
    tf.train.init_from_checkpoint(checkpoint_path, assign_map)
    init_op = tf.initializers.global_variables()
    with tf.Session() as session:
      session.run(init_op)
      m.export(export_path, session) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:23,代码来源:export.py

示例3: from_hub_module

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import Module [as 别名]
def from_hub_module(cls, hub_module, use_spm=True):
    """Get the vocab file and casing info from the Hub module."""
    with tf.Graph().as_default():
      albert_module = hub.Module(hub_module)
      tokenization_info = albert_module(signature="tokenization_info",
                                        as_dict=True)
      with tf.Session() as sess:
        vocab_file, do_lower_case = sess.run(
            [tokenization_info["vocab_file"],
             tokenization_info["do_lower_case"]])
    if use_spm:
      spm_model_file = vocab_file
      vocab_file = None
    return FullTokenizer(
        vocab_file=vocab_file, do_lower_case=do_lower_case,
        spm_model_file=spm_model_file) 
开发者ID:google-research,项目名称:albert,代码行数:18,代码来源:tokenization.py

示例4: _create_model_from_hub

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import Module [as 别名]
def _create_model_from_hub(hub_module, is_training, input_ids, input_mask,
                           segment_ids):
  """Creates an ALBERT model from TF-Hub."""
  tags = set()
  if is_training:
    tags.add("train")
  albert_module = hub.Module(hub_module, tags=tags, trainable=True)
  albert_inputs = dict(
      input_ids=input_ids,
      input_mask=input_mask,
      segment_ids=segment_ids)
  albert_outputs = albert_module(
      inputs=albert_inputs,
      signature="tokens",
      as_dict=True)
  return (albert_outputs["pooled_output"], albert_outputs["sequence_output"]) 
开发者ID:google-research,项目名称:albert,代码行数:18,代码来源:fine_tuning_utils.py

示例5: __init__

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import Module [as 别名]
def __init__(self, uri, batch_size=100):
        """Create a new UseEncoderClient object

        Args:
            uri: The uri to the tensorflow_hub USE module
            batch_size: maximum number of sentences to encode at once
        """
        self._batch_size = batch_size
        self._session = tf.Session(graph=tf.Graph())
        with self._session.graph.as_default():
            glog.info("Loading %s model from tensorflow hub", uri)
            embed_fn = tf_hub.Module(uri)
            self._fed_texts = tf.placeholder(shape=[None], dtype=tf.string)
            self._embeddings = embed_fn(self._fed_texts)
            encoding_info = embed_fn.get_output_info_dict().get('default')
            if encoding_info:
                self._encoding_dim = encoding_info.get_shape()[-1].value
            init_ops = (
                tf.global_variables_initializer(), tf.tables_initializer())
        glog.info("Initializing graph.")
        self._session.run(init_ops) 
开发者ID:PolyAI-LDN,项目名称:polyai-models,代码行数:23,代码来源:encoder_clients.py

示例6: load_model

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import Module [as 别名]
def load_model(self, model: str, model_path: str, max_seq_length: int):
        g = tf.Graph()
        with g.as_default():
            hub_module = hub.Module(model_path)
            self.tokens = tf.placeholder(dtype=tf.string, shape=[None, max_seq_length])
            self.sequence_len = tf.placeholder(dtype=tf.int32, shape=[None])

            elmo_inputs = dict(
                tokens=self.tokens,
                sequence_len=self.sequence_len
            )
            self.elmo_outputs = hub_module(elmo_inputs, signature="tokens", as_dict=True)
            init_op = tf.group([tf.global_variables_initializer()])
        g.finalize()
        self.sess = tf.Session(graph=g)
        self.sess.run(init_op)

        self.model_name = model
        self.max_seq_length = max_seq_length 
开发者ID:amansrivastava17,项目名称:embedding-as-service,代码行数:21,代码来源:__init__.py

示例7: create_module_graph

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import Module [as 别名]
def create_module_graph(module_spec):
    """Creates a graph and loads Hub Module into it.

    Args:
      module_spec: the hub.ModuleSpec for the image module being used.

    Returns:
      graph: the tf.Graph that was created.
      bottleneck_tensor: the bottleneck values output by the module.
      resized_input_tensor: the input images, resized as expected by the module.
      wants_quantization: a boolean, whether the module has been instrumented
        with fake quantization ops.
    """
    height, width = hub.get_expected_image_size(module_spec)
    with tf.Graph().as_default() as graph:
        resized_input_tensor = tf.placeholder(tf.float32, [None, height, width, 3])
        m = hub.Module(module_spec)
        bottleneck_tensor = m(resized_input_tensor)
        wants_quantization = any(node.op in FAKE_QUANT_OPS
                                 for node in graph.as_graph_def().node)
    return graph, bottleneck_tensor, resized_input_tensor, wants_quantization 
开发者ID:joelbarmettlerUZH,项目名称:FaceClassification_Tensorflow,代码行数:23,代码来源:retrain.py

示例8: __init__

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import Module [as 别名]
def __init__(self):

    logging.info('Initialising embedding utility...')
    embed_module = hub.Module(MODULE_URL)
    placeholder = tf.placeholder(dtype=tf.string)
    embed = embed_module(placeholder)
    session = tf.Session()
    session.run([tf.global_variables_initializer(), tf.tables_initializer()])
    logging.info('tf.Hub module is loaded.')

    def _embeddings_fn(sentences):
      computed_embeddings = session.run(
        embed, feed_dict={placeholder: sentences})
      return computed_embeddings

    self.embedding_fn = _embeddings_fn
    logging.info('Embedding utility initialised.') 
开发者ID:GoogleCloudPlatform,项目名称:realtime-embeddings-matching,代码行数:19,代码来源:embedding.py

示例9: __init__

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import Module [as 别名]
def __init__(self, hub_path=gin.REQUIRED, name="HubEmbedding", **kwargs):
    """Constructs a HubEmbedding.

    Args:
      hub_path: Path to the TFHub module.
      name: String with the name of the model.
      **kwargs: Other keyword arguments passed to tf.keras.Model.
    """
    super(HubEmbedding, self).__init__(name=name, **kwargs)

    def _embedder(x):
      embedder_module = hub.Module(hub_path)
      return embedder_module(dict(images=x), signature="representation")

    self.embedding_layer = relational_layers.MultiDimBatchApply(
        tf.keras.layers.Lambda(_embedder)) 
开发者ID:google-research,项目名称:disentanglement_lib,代码行数:18,代码来源:models.py

示例10: __init__

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import Module [as 别名]
def __init__(self,
                 model_path=None,
                 batch_size=32):
        self.resources_dir = os.path.join(os.path.dirname(__file__), 'resources')

        if model_path is None:
            model_path = os.path.join('pretrained_USE', '1fb57c3ffe1a38479233ee9853ddd7a8ac8a8c47')
        if os.path.exists(os.path.join(self.resources_dir, model_path)):
            self.url = os.path.join(self.resources_dir, model_path)
        else:
            os.environ['TFHUB_CACHE_DIR'] = os.path.join(self.resources_dir, 'pretrained_USE')
            self.url = "https://tfhub.dev/google/universal-sentence-encoder/2"

        # load the use model from saved location or tensorflow hub
        self.embed = hub.Module(self.url, trainable=True)
        self.sess = tf.Session()
        self.epochs = 10
        self.lr = 0.01
        self.batch_size = batch_size
        self.sess.run([tf.global_variables_initializer(),
                       tf.tables_initializer()]) 
开发者ID:IBM,项目名称:lale,代码行数:23,代码来源:use_pretrained_encoder.py

示例11: __init__

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import Module [as 别名]
def __init__(self, uri):
        """Create a new `USEDualEncoder` object."""
        self._session = tf.Session(graph=tf.Graph())
        with self._session.graph.as_default():
            glog.info("Loading %s model from tensorflow hub", uri)
            embed_fn = tensorflow_hub.Module(uri)
            self._fed_texts = tf.placeholder(shape=[None], dtype=tf.string)
            self._context_embeddings = embed_fn(
                dict(input=self._fed_texts),
                signature="question_encoder",
                as_dict=True,
            )['outputs']
            empty_strings = tf.fill(
                tf.shape(self._fed_texts), ""
            )
            self._response_embeddings = embed_fn(
                dict(input=self._fed_texts, context=empty_strings),
                signature="response_encoder",
                as_dict=True,
            )['outputs']
            init_ops = (
                tf.global_variables_initializer(), tf.tables_initializer())
        glog.info("Initializing graph.")
        self._session.run(init_ops) 
开发者ID:PolyAI-LDN,项目名称:conversational-datasets,代码行数:26,代码来源:vector_based.py

示例12: _run_elmo

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import Module [as 别名]
def _run_elmo():
    ### embedding_file = "https://tfhub.dev/google/elmo/2"
    elmo = hub.Module(embedding_file, trainable=True)

    words = [word.strip().split()[0] for word in open(vocab_file).readlines()]
    gpu_options = tf.GPUOptions(allow_growth=True)
    session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_options)

    fw = open(out_file, 'w')
    fw.write('{} {}\n'.format(len(words), emb_dim))
    batch_size = 4096
    with tf.Session(config=session_config) as sess:
        i = 0
        while i < len(words):
            print('batch {}/{}...'.format(i/batch_size, len(words)/batch_size))
            _words = [words[i:(i+batch_size)]]
            embeddings = elmo(
                inputs={
                    "tokens": _words,
                    "sequence_len": [1]*len(_words)
                },
                signature="tokens",
                as_dict=True)["elmo"]
            sess.run(tf.global_variables_initializer())
            _emb = sess.run([embeddings])[0][0]
            for word, emb in zip(_words[0], np.array(_emb)):
                fw.write('{} {}\n'.format(word, ' '.join([str(_) for _ in emb])))
            i += batch_size
    fw.close() 
开发者ID:yaserkl,项目名称:TransferRL,代码行数:31,代码来源:elmo_embedding.py

示例13: create_tokenizer_from_hub_module

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import Module [as 别名]
def create_tokenizer_from_hub_module():
  """Get the vocab file and casing info from the Hub module."""
  with tf.Graph().as_default():
    bert_module = hub.Module(BERT_MODEL_HUB)
    tokenization_info = bert_module(signature="tokenization_info", as_dict=True)
    with tf.Session() as sess:
      vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"],
                                            tokenization_info["do_lower_case"]])
      
  return FullTokenizer(
      vocab_file=vocab_file, do_lower_case=do_lower_case) 
开发者ID:diffbot,项目名称:knowledge-net,代码行数:13,代码来源:bert_wrapper.py

示例14: create_tokenizer_from_hub_module

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import Module [as 别名]
def create_tokenizer_from_hub_module(bert_hub_module_handle):
  """Get the vocab file and casing info from the Hub module."""
  with tf.Graph().as_default():
    bert_module = hub.Module(bert_hub_module_handle)
    tokenization_info = bert_module(signature="tokenization_info", as_dict=True)
    with tf.Session() as sess:
      vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"],
                                            tokenization_info["do_lower_case"]])
  return tokenization.FullTokenizer(
      vocab_file=vocab_file, do_lower_case=do_lower_case) 
开发者ID:Nagakiran1,项目名称:Extending-Google-BERT-as-Question-and-Answering-model-and-Chatbot,代码行数:12,代码来源:run_classifier_with_tfhub.py

示例15: from_hub_module

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import Module [as 别名]
def from_hub_module(cls, hub_module, spm_model_file):
        """Get the vocab file and casing info from the Hub module."""
        import tensorflow_hub as hub
        with tf.Graph().as_default():
            albert_module = hub.Module(hub_module)
            tokenization_info = albert_module(signature="tokenization_info",
                                              as_dict=True)
            with tf.Session() as sess:
                vocab_file, do_lower_case = sess.run(
                    [tokenization_info["vocab_file"],
                     tokenization_info["do_lower_case"]])
        return FullTokenizer(
            vocab_file=vocab_file, do_lower_case=do_lower_case,
            spm_model_file=spm_model_file) 
开发者ID:kpe,项目名称:bert-for-tf2,代码行数:16,代码来源:albert_tokenization.py


注:本文中的tensorflow_hub.Module方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。