當前位置: 首頁>>代碼示例>>Python>>正文


Python lookup_ops.index_table_from_tensor方法代碼示例

本文整理匯總了Python中tensorflow.python.ops.lookup_ops.index_table_from_tensor方法的典型用法代碼示例。如果您正苦於以下問題:Python lookup_ops.index_table_from_tensor方法的具體用法?Python lookup_ops.index_table_from_tensor怎麽用?Python lookup_ops.index_table_from_tensor使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.ops.lookup_ops的用法示例。


在下文中一共展示了lookup_ops.index_table_from_tensor方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _transform_feature

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_tensor [as 別名]
def _transform_feature(self, inputs):
    input_tensor = _to_sparse_input(inputs.get(self.key))

    if self.dtype.is_integer != input_tensor.dtype.is_integer:
      raise ValueError(
          'Column dtype and SparseTensors dtype must be compatible. '
          'key: {}, column dtype: {}, tensor dtype: {}'.format(
              self.key, self.dtype, input_tensor.dtype))

    _assert_string_or_int(
        input_tensor.dtype,
        prefix='column_name: {} input_tensor'.format(self.key))

    key_dtype = self.dtype
    if input_tensor.dtype.is_integer:
      # `index_table_from_tensor` requires 64-bit integer keys.
      key_dtype = dtypes.int64
      input_tensor = math_ops.to_int64(input_tensor)

    return lookup_ops.index_table_from_tensor(
        vocabulary_list=tuple(self.vocabulary_list),
        default_value=self.default_value,
        dtype=key_dtype,
        name='{}_lookup'.format(self.key)).lookup(input_tensor) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:26,代碼來源:feature_column.py

示例2: _label_ids

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_tensor [as 別名]
def _label_ids(self, labels):
    """Converts labels to integer id space."""
    if self._label_vocabulary is None:
      if not labels.dtype.is_integer:
        raise ValueError(
            'Labels dtype should be integer. Instead got {}.'.format(
                labels.dtype))
      label_ids = labels
    else:
      if labels.dtype != tf.dtypes.string:
        raise ValueError('Labels dtype should be string if there is a '
                         'vocabulary. Instead got {}'.format(labels.dtype))
      label_ids = lookup_ops.index_table_from_tensor(
          vocabulary_list=tuple(self._label_vocabulary),
          name='class_id_lookup').lookup(labels)
    return _assert_range(label_ids, self._n_classes) 
開發者ID:tensorflow,項目名稱:estimator,代碼行數:18,代碼來源:head.py

示例3: _transform_feature

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_tensor [as 別名]
def _transform_feature(self, inputs):
    input_tensor = _to_sparse_input(inputs.get(self.key))

    if self.dtype.is_integer != input_tensor.dtype.is_integer:
      raise ValueError(
          'Column dtype and SparseTensors dtype must be compatible. '
          'key: {}, column dtype: {}, tensor dtype: {}'.format(
              self.key, self.dtype, input_tensor.dtype))

    _assert_string_or_int(
        input_tensor.dtype,
        prefix='column_name: {} input_tensor'.format(self.key))

    key_dtype = self.dtype
    if input_tensor.dtype.is_integer:
      # `index_table_from_tensor` requires 64-bit integer keys.
      key_dtype = dtypes.int64
      input_tensor = math_ops.to_int64(input_tensor)

    return lookup_ops.index_table_from_tensor(
        vocabulary_list=tuple(self.vocabulary_list),
        default_value=self.default_value,
        num_oov_buckets=self.num_oov_buckets,
        dtype=key_dtype,
        name='{}_lookup'.format(self.key)).lookup(input_tensor) 
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:27,代碼來源:feature_column.py

示例4: testDecodeExampleWithBranchedLookup

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_tensor [as 別名]
def testDecodeExampleWithBranchedLookup(self):

    example = example_pb2.Example(features=feature_pb2.Features(feature={
        'image/object/class/text': self._BytesFeatureFromList(
            np.array(['cat', 'dog', 'guinea pig'])),
    }))
    serialized_example = example.SerializeToString()
    # 'dog' -> 0, 'guinea pig' -> 1, 'cat' -> 2
    table = lookup_ops.index_table_from_tensor(
        constant_op.constant(['dog', 'guinea pig', 'cat']))

    with self.test_session() as sess:
      sess.run(lookup_ops.tables_initializer())

      serialized_example = array_ops.reshape(serialized_example, shape=[])

      keys_to_features = {
          'image/object/class/text': parsing_ops.VarLenFeature(dtypes.string),
      }

      items_to_handlers = {
          'labels':
              tf_example_decoder.LookupTensor('image/object/class/text', table),
      }

      decoder = slim_example_decoder.TFExampleDecoder(keys_to_features,
                                                      items_to_handlers)
      obtained_class_ids = decoder.decode(serialized_example)[0].eval()

    self.assertAllClose([2, 0, 1], obtained_class_ids) 
開發者ID:cagbal,項目名稱:ros_people_object_detection_tensorflow,代碼行數:32,代碼來源:tf_example_decoder_test.py

示例5: testGetInferIterator

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_tensor [as 別名]
def testGetInferIterator(self):
    src_vocab_table = lookup_ops.index_table_from_tensor(
        tf.constant(["a", "b", "c", "eos", "sos"]))
    src_dataset = tf.data.Dataset.from_tensor_slices(
        tf.constant(["c c a", "c a", "d", "f e a g"]))
    hparams = tf.contrib.training.HParams(
        random_seed=3,
        eos="eos",
        sos="sos")
    batch_size = 2
    dataset = iterator_utils.get_infer_iterator(
        src_dataset=src_dataset,
        src_vocab_table=src_vocab_table,
        batch_size=batch_size,
        eos=hparams.eos)
    table_initializer = tf.tables_initializer()
    iterator = dataset.make_initializable_iterator()
    get_next = iterator.get_next()
    with self.test_session() as sess:
      sess.run(table_initializer)
      sess.run(iterator.initializer)
      features = sess.run(get_next)

      self.assertAllEqual(
          [
              [2, 2, 0],  # c c a
              [2, 0, 3]
          ],  # c a eos
          features["source"])
      self.assertAllEqual([3, 2], features["source_sequence_length"]) 
開發者ID:mlperf,項目名稱:training_results_v0.5,代碼行數:32,代碼來源:iterator_utils_test.py

示例6: create_test_iterator

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_tensor [as 別名]
def create_test_iterator(hparams, mode):
  """Create test iterator."""
  src_vocab_table = lookup_ops.index_table_from_tensor(
      tf.constant([hparams.eos, "a", "b", "c", "d"]))
  tgt_vocab_mapping = tf.constant([hparams.sos, hparams.eos, "a", "b", "c"])
  tgt_vocab_table = lookup_ops.index_table_from_tensor(tgt_vocab_mapping)
  if mode == tf.contrib.learn.ModeKeys.INFER:
    reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_tensor(
        tgt_vocab_mapping)

  src_dataset = tf.contrib.data.Dataset.from_tensor_slices(
      tf.constant(["a a b b c", "a b b"]))

  if mode != tf.contrib.learn.ModeKeys.INFER:
    tgt_dataset = tf.contrib.data.Dataset.from_tensor_slices(
        tf.constant(["a b c b c", "a b c b"]))
    return (
        iterator_utils.get_iterator(
            src_dataset=src_dataset,
            tgt_dataset=tgt_dataset,
            src_vocab_table=src_vocab_table,
            tgt_vocab_table=tgt_vocab_table,
            batch_size=hparams.batch_size,
            sos=hparams.sos,
            eos=hparams.eos,
            source_reverse=hparams.source_reverse,
            random_seed=hparams.random_seed,
            num_buckets=hparams.num_buckets),
        src_vocab_table,
        tgt_vocab_table)
  else:
    return (
        iterator_utils.get_infer_iterator(
            src_dataset=src_dataset,
            src_vocab_table=src_vocab_table,
            eos=hparams.eos,
            source_reverse=hparams.source_reverse,
            batch_size=hparams.batch_size),
        src_vocab_table,
        tgt_vocab_table,
        reverse_tgt_vocab_table) 
開發者ID:steveash,項目名稱:NETransliteration-COLING2018,代碼行數:43,代碼來源:common_test_utils.py

示例7: create_test_iterator

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_tensor [as 別名]
def create_test_iterator(hparams, mode):
  """Create test iterator."""
  src_vocab_table = lookup_ops.index_table_from_tensor(
      tf.constant([hparams.eos, "a", "b", "c", "d"]))
  tgt_vocab_mapping = tf.constant([hparams.sos, hparams.eos, "a", "b", "c"])
  tgt_vocab_table = lookup_ops.index_table_from_tensor(tgt_vocab_mapping)
  if mode == tf.contrib.learn.ModeKeys.INFER:
    reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_tensor(
        tgt_vocab_mapping)

  src_dataset = tf.data.Dataset.from_tensor_slices(
      tf.constant(["a a b b c", "a b b"]))

  if mode != tf.contrib.learn.ModeKeys.INFER:
    tgt_dataset = tf.data.Dataset.from_tensor_slices(
        tf.constant(["a b c b c", "a b c b"]))
    return (
        iterator_utils.get_iterator(
            src_dataset=src_dataset,
            tgt_dataset=tgt_dataset,
            src_vocab_table=src_vocab_table,
            tgt_vocab_table=tgt_vocab_table,
            batch_size=hparams.batch_size,
            sos=hparams.sos,
            eos=hparams.eos,
            random_seed=hparams.random_seed,
            num_buckets=hparams.num_buckets),
        src_vocab_table,
        tgt_vocab_table)
  else:
    return (
        iterator_utils.get_infer_iterator(
            src_dataset=src_dataset,
            src_vocab_table=src_vocab_table,
            eos=hparams.eos,
            batch_size=hparams.batch_size),
        src_vocab_table,
        tgt_vocab_table,
        reverse_tgt_vocab_table) 
開發者ID:snuspl,項目名稱:parallax,代碼行數:41,代碼來源:common_test_utils.py

示例8: testDecodeExampleWithLookup

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_tensor [as 別名]
def testDecodeExampleWithLookup(self):

    example = tf.train.Example(
        features=tf.train.Features(
            feature={
                'image/object/class/text':
                    self._BytesFeature(np.array(['cat', 'dog', 'guinea pig'])),
            }))
    serialized_example = example.SerializeToString()
    # 'dog' -> 0, 'guinea pig' -> 1, 'cat' -> 2
    table = lookup_ops.index_table_from_tensor(
        tf.constant(['dog', 'guinea pig', 'cat']))

    with self.cached_session() as sess:
      sess.run(lookup_ops.tables_initializer())

      serialized_example = array_ops.reshape(serialized_example, shape=[])

      keys_to_features = {
          'image/object/class/text': parsing_ops.VarLenFeature(tf.string),
      }

      items_to_handlers = {
          'labels':
              tfexample_decoder.LookupTensor('image/object/class/text', table),
      }

      decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
                                                   items_to_handlers)
      obtained_class_ids = decoder.decode(serialized_example)[0].eval()

    self.assertAllClose([2, 0, 1], obtained_class_ids) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:34,代碼來源:tfexample_decoder_test.py

示例9: create_loss

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_tensor [as 別名]
def create_loss(self, features, mode, logits, labels):
    """See `Head`."""
    del mode  # Unused for this head.
    logits = ops.convert_to_tensor(logits)
    labels = _check_dense_labels_match_logits_and_reshape(
        labels=labels, logits=logits, expected_labels_dimension=1)
    if self._label_vocabulary is not None:
      labels = lookup_ops.index_table_from_tensor(
          vocabulary_list=tuple(self._label_vocabulary),
          name='class_id_lookup').lookup(labels)
    labels = tf.cast(labels, dtype=tf.dtypes.float32)
    labels = _assert_range(labels, n_classes=2)
    if self._loss_fn:
      unweighted_loss = _call_loss_fn(
          loss_fn=self._loss_fn,
          labels=labels,
          logits=logits,
          features=features,
          expected_loss_dim=1)
    else:
      unweighted_loss = tf.compat.v1.nn.sigmoid_cross_entropy_with_logits(
          labels=labels, logits=logits)
    weights = _get_weights_and_check_match_logits(
        features=features, weight_column=self._weight_column, logits=logits)
    training_loss = tf.compat.v1.losses.compute_weighted_loss(
        unweighted_loss, weights=weights, reduction=self._loss_reduction)
    return LossSpec(
        training_loss=training_loss,
        unreduced_loss=unweighted_loss,
        weights=weights,
        processed_labels=labels) 
開發者ID:tensorflow,項目名稱:estimator,代碼行數:33,代碼來源:head.py

示例10: _class_id_table

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_tensor [as 別名]
def _class_id_table(self):
    """Creates a lookup table for class_id.

    In eager execution, this lookup table will be lazily created on the first
    call of `self._class_id_table`, and cached for later use; In graph
    execution, it will be created on demand.

    Returns:
      A hash table for lookup.
    """
    if self._cached_class_id_table is None or not tf.executing_eagerly():
      self._cached_class_id_table = lookup_ops.index_table_from_tensor(
          vocabulary_list=tuple(self._label_vocabulary), name='class_id_lookup')
    return self._cached_class_id_table 
開發者ID:tensorflow,項目名稱:estimator,代碼行數:16,代碼來源:binary_class_head.py

示例11: _label_ids

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_tensor [as 別名]
def _label_ids(self, labels):
    """Converts labels to integer id space."""
    if self._label_vocabulary is None:
      if not labels.dtype.is_integer:
        raise ValueError('Labels dtype should be integer '
                         'Instead got %s.' % labels.dtype)
      label_ids = labels
    else:
      if labels.dtype != dtypes.string:
        raise ValueError('Labels dtype should be string if there is a '
                         'vocabulary. Instead got {}'.format(labels.dtype))
      label_ids = lookup_ops.index_table_from_tensor(
          vocabulary_list=tuple(self._label_vocabulary),
          name='class_id_lookup').lookup(labels)
    return _assert_range(label_ids, self._n_classes) 
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:17,代碼來源:head.py

示例12: create_loss

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_tensor [as 別名]
def create_loss(self, features, mode, logits, labels):
    """See `Head`."""
    del mode, features  # Unused for this head.
    labels = _check_labels(_maybe_expand_dim(labels), self.logits_dimension)
    if self._label_vocabulary is not None:
      labels = lookup_ops.index_table_from_tensor(
          vocabulary_list=tuple(self._label_vocabulary),
          name='class_id_lookup').lookup(labels)
    labels = math_ops.to_float(labels)
    labels = _assert_range(labels, 2)
    return LossAndLabels(
        unweighted_loss=nn.sigmoid_cross_entropy_with_logits(
            labels=labels, logits=logits),
        processed_labels=labels) 
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:16,代碼來源:head.py

示例13: testDecodeExampleWithBranchedBackupHandler

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_tensor [as 別名]
def testDecodeExampleWithBranchedBackupHandler(self):
    example1 = example_pb2.Example(
        features=feature_pb2.Features(
            feature={
                'image/object/class/text':
                    self._BytesFeatureFromList(
                        np.array(['cat', 'dog', 'guinea pig'])),
                'image/object/class/label':
                    self._Int64FeatureFromList(np.array([42, 10, 900]))
            }))
    example2 = example_pb2.Example(
        features=feature_pb2.Features(
            feature={
                'image/object/class/text':
                    self._BytesFeatureFromList(
                        np.array(['cat', 'dog', 'guinea pig'])),
            }))
    example3 = example_pb2.Example(
        features=feature_pb2.Features(
            feature={
                'image/object/class/label':
                    self._Int64FeatureFromList(np.array([42, 10, 901]))
            }))
    # 'dog' -> 0, 'guinea pig' -> 1, 'cat' -> 2
    table = lookup_ops.index_table_from_tensor(
        constant_op.constant(['dog', 'guinea pig', 'cat']))
    keys_to_features = {
        'image/object/class/text': parsing_ops.VarLenFeature(dtypes.string),
        'image/object/class/label': parsing_ops.VarLenFeature(dtypes.int64),
    }
    backup_handler = tf_example_decoder.BackupHandler(
        handler=slim_example_decoder.Tensor('image/object/class/label'),
        backup=tf_example_decoder.LookupTensor('image/object/class/text',
                                               table))
    items_to_handlers = {
        'labels': backup_handler,
    }
    decoder = slim_example_decoder.TFExampleDecoder(keys_to_features,
                                                    items_to_handlers)
    obtained_class_ids_each_example = []
    with self.test_session() as sess:
      sess.run(lookup_ops.tables_initializer())
      for example in [example1, example2, example3]:
        serialized_example = array_ops.reshape(
            example.SerializeToString(), shape=[])
        obtained_class_ids_each_example.append(
            decoder.decode(serialized_example)[0].eval())

    self.assertAllClose([42, 10, 900], obtained_class_ids_each_example[0])
    self.assertAllClose([2, 0, 1], obtained_class_ids_each_example[1])
    self.assertAllClose([42, 10, 901], obtained_class_ids_each_example[2]) 
開發者ID:cagbal,項目名稱:ros_people_object_detection_tensorflow,代碼行數:53,代碼來源:tf_example_decoder_test.py

示例14: testGetIterator

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_tensor [as 別名]
def testGetIterator(self):
    tf.set_random_seed(1)
    tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(
        tf.constant(["a", "b", "c", "eos", "sos"]))
    src_dataset = tf.data.Dataset.from_tensor_slices(
        tf.constant(["f e a g", "c c a", "d", "c a"]))
    tgt_dataset = tf.data.Dataset.from_tensor_slices(
        tf.constant(["c c", "a b", "", "b c"]))
    hparams = tf.contrib.training.HParams(
        random_seed=3,
        num_buckets=5,
        eos="eos",
        sos="sos")
    batch_size = 2
    src_max_len = 3
    dataset = iterator_utils.get_iterator(
        src_dataset=src_dataset,
        tgt_dataset=tgt_dataset,
        src_vocab_table=src_vocab_table,
        tgt_vocab_table=tgt_vocab_table,
        batch_size=batch_size,
        sos=hparams.sos,
        eos=hparams.eos,
        random_seed=hparams.random_seed,
        num_buckets=hparams.num_buckets,
        src_max_len=src_max_len,
        reshuffle_each_iteration=False)
    table_initializer = tf.tables_initializer()
    iterator = dataset.make_initializable_iterator()
    get_next = iterator.get_next()
    with self.test_session() as sess:
      sess.run(table_initializer)
      sess.run(iterator.initializer)
      features = sess.run(get_next)
      self.assertAllEqual(
          [[2, 0, 3],   # c a eos -- eos is padding
           [2, 2, 0]],  # c c a
          features["source"])
      self.assertAllEqual([2, 3], features["source_sequence_length"])
      self.assertAllEqual(
          [[4, 1, 2],   # sos b c
           [4, 0, 1]],  # sos a b
          features["target_input"])
      self.assertAllEqual(
          [[1, 2, 3],   # b c eos
           [0, 1, 3]],  # a b eos
          features["target_output"])
      self.assertAllEqual([3, 3], features["target_sequence_length"]) 
開發者ID:mlperf,項目名稱:training_results_v0.5,代碼行數:50,代碼來源:iterator_utils_test.py

示例15: testGetIteratorWithShard

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_tensor [as 別名]
def testGetIteratorWithShard(self):
    tf.set_random_seed(1)
    tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(
        tf.constant(["a", "b", "c", "eos", "sos"]))
    src_dataset = tf.data.Dataset.from_tensor_slices(
        tf.constant(["c c a", "f e a g", "d", "c a"]))
    tgt_dataset = tf.data.Dataset.from_tensor_slices(
        tf.constant(["a b", "c c", "", "b c"]))
    hparams = tf.contrib.training.HParams(
        random_seed=3,
        num_buckets=5,
        eos="eos",
        sos="sos")
    batch_size = 2
    src_max_len = 3
    dataset = iterator_utils.get_iterator(
        src_dataset=src_dataset,
        tgt_dataset=tgt_dataset,
        src_vocab_table=src_vocab_table,
        tgt_vocab_table=tgt_vocab_table,
        batch_size=batch_size,
        sos=hparams.sos,
        eos=hparams.eos,
        random_seed=hparams.random_seed,
        num_buckets=hparams.num_buckets,
        src_max_len=src_max_len,
        num_shards=2,
        shard_index=1,
        reshuffle_each_iteration=False)
    table_initializer = tf.tables_initializer()
    iterator = dataset.make_initializable_iterator()
    get_next = iterator.get_next()
    with self.test_session() as sess:
      sess.run(table_initializer)
      sess.run(iterator.initializer)
      features = sess.run(get_next)
      self.assertAllEqual(
          [[2, 0, 3],     # c a eos -- eos is padding
           [-1, -1, 0]],  # "f" == unknown, "e" == unknown, a
          features["source"])
      self.assertAllEqual([2, 3], features["source_sequence_length"])
      self.assertAllEqual(
          [[4, 1, 2],   # sos b c
           [4, 2, 2]],  # sos c c
          features["target_input"])
      self.assertAllEqual(
          [[1, 2, 3],   # b c eos
           [2, 2, 3]],  # c c eos
          features["target_output"])
      self.assertAllEqual([3, 3], features["target_sequence_length"]) 
開發者ID:mlperf,項目名稱:training_results_v0.5,代碼行數:52,代碼來源:iterator_utils_test.py


注:本文中的tensorflow.python.ops.lookup_ops.index_table_from_tensor方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。