當前位置: 首頁>>代碼示例>>Python>>正文


Python lookup_ops.index_table_from_file方法代碼示例

本文整理匯總了Python中tensorflow.python.ops.lookup_ops.index_table_from_file方法的典型用法代碼示例。如果您正苦於以下問題:Python lookup_ops.index_table_from_file方法的具體用法?Python lookup_ops.index_table_from_file怎麽用?Python lookup_ops.index_table_from_file使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.ops.lookup_ops的用法示例。


在下文中一共展示了lookup_ops.index_table_from_file方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _transform_feature

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_file [as 別名]
def _transform_feature(self, inputs):
    input_tensor = _to_sparse_input(inputs.get(self.key))

    if self.dtype.is_integer != input_tensor.dtype.is_integer:
      raise ValueError(
          'Column dtype and SparseTensors dtype must be compatible. '
          'key: {}, column dtype: {}, tensor dtype: {}'.format(
              self.key, self.dtype, input_tensor.dtype))

    _assert_string_or_int(
        input_tensor.dtype,
        prefix='column_name: {} input_tensor'.format(self.key))

    key_dtype = self.dtype
    if input_tensor.dtype.is_integer:
      # `index_table_from_file` requires 64-bit integer keys.
      key_dtype = dtypes.int64
      input_tensor = math_ops.to_int64(input_tensor)

    return lookup_ops.index_table_from_file(
        vocabulary_file=self.vocabulary_file,
        num_oov_buckets=self.num_oov_buckets,
        vocab_size=self.vocabulary_size,
        default_value=self.default_value,
        key_dtype=key_dtype,
        name='{}_lookup'.format(self.key)).lookup(input_tensor) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:28,代碼來源:feature_column.py

示例2: create_tgt_vocab_table

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_file [as 別名]
def create_tgt_vocab_table(tgt_vocab_file):
    """Creates vocab tables for src_vocab_file and tgt_vocab_file."""
    tgt_vocab_table = lookup_ops.index_table_from_file(tgt_vocab_file, default_value=UNK_ID)

    return tgt_vocab_table 
開發者ID:neccam,項目名稱:nslt,代碼行數:7,代碼來源:vocab_utils.py

示例3: create_vocab_tables

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_file [as 別名]
def create_vocab_tables(src_vocab_file, tgt_vocab_file, share_vocab):
  """Creates vocab tables for src_vocab_file and tgt_vocab_file."""
  src_vocab_table = lookup_ops.index_table_from_file(
      src_vocab_file, default_value=UNK_ID)
  if share_vocab:
    tgt_vocab_table = src_vocab_table
  else:
    tgt_vocab_table = lookup_ops.index_table_from_file(
        tgt_vocab_file, default_value=UNK_ID)
  return src_vocab_table, tgt_vocab_table 
開發者ID:mlperf,項目名稱:training_results_v0.5,代碼行數:12,代碼來源:vocab_utils.py

示例4: create_train_model

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_file [as 別名]
def create_train_model(hparams, model_creator):
  txt_file = "%s.%s" % (hparams.train_prefix, "txt")
  lb_file = "%s.%s" % (hparams.train_prefix, "lb")
  vocab_file = hparams.vocab_file
  index_file = hparams.index_file

  graph = tf.Graph()

  with graph.as_default(), tf.container("train"):
    vocab_table = lookup_ops.index_table_from_file(
      vocab_file, default_value = UNK_ID)
    # for the labels
    index_table = lookup_ops.index_table_from_file(
      index_file, default_value = 0)

    txt_dataset = tf.data.TextLineDataset(txt_file)
    lb_dataset = tf.data.TextLineDataset(lb_file)

    iterator = data_iterator.get_iterator(
        txt_dataset,
        lb_dataset,
        vocab_table,
        index_table,
        batch_size = hparams.batch_size,
        num_buckets = hparams.num_buckets,
        max_len = hparams.max_len)

    model = model_creator(
        hparams,
        iterator = iterator,
        mode = tf.contrib.learn.ModeKeys.TRAIN,
        vocab_table = vocab_table)

  return TrainModel(
      graph = graph,
      model = model,
      iterator = iterator) 
開發者ID:MeteorYee,項目名稱:LSTM-CNN-CWS,代碼行數:39,代碼來源:model_helper.py

示例5: create_eval_model

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_file [as 別名]
def create_eval_model(hparams, model_creator):
  vocab_file = hparams.vocab_file
  index_file = hparams.index_file
  graph = tf.Graph()

  with graph.as_default(), tf.container("eval"):
    vocab_table = lookup_ops.index_table_from_file(
      vocab_file, default_value = UNK_ID)
    # for the labels
    index_table = lookup_ops.index_table_from_file(
      index_file, default_value = 0)

    # the file's name
    txt_file_placeholder = tf.placeholder(shape = (), dtype = tf.string)
    lb_file_placeholder = tf.placeholder(shape = (), dtype = tf.string)
    txt_dataset = tf.data.TextLineDataset(txt_file_placeholder)
    lb_dataset = tf.data.TextLineDataset(lb_file_placeholder)

    iterator = data_iterator.get_iterator(
        txt_dataset,
        lb_dataset,
        vocab_table,
        index_table,
        batch_size = hparams.batch_size,
        num_buckets = hparams.num_buckets,
        max_len = hparams.max_len)

    model = model_creator(
        hparams,
        iterator = iterator,
        mode = tf.contrib.learn.ModeKeys.EVAL,
        vocab_table = vocab_table)

  return EvalModel(
      graph = graph,
      model = model,
      txt_file_placeholder = txt_file_placeholder,
      lb_file_placeholder = lb_file_placeholder,
      iterator = iterator) 
開發者ID:MeteorYee,項目名稱:LSTM-CNN-CWS,代碼行數:41,代碼來源:model_helper.py

示例6: create_infer_model

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_file [as 別名]
def create_infer_model(hparams, model_creator):
  """Create inference model."""
  graph = tf.Graph()
  vocab_file = hparams.vocab_file

  with graph.as_default(), tf.container("infer"):
    vocab_table = lookup_ops.index_table_from_file(
      vocab_file, default_value = UNK_ID)
    # for the labels
    '''
    Although this is nonsense for the inference procedure, this is to ensure
    the labels are not None when building the model graph.
    (refer to model.BasicModel._decode_layer)
    '''
    mapping_strings = tf.constant(['0'])
    index_table = tf.contrib.lookup.index_table_from_tensor(
    mapping = mapping_strings, default_value = 0)

    txt_placeholder = tf.placeholder(shape=[None], dtype = tf.string)
    batch_size_placeholder = tf.placeholder(shape = [], dtype = tf.int64)

    txt_dataset = tf.data.Dataset.from_tensor_slices(
        txt_placeholder)
    iterator = data_iterator.get_infer_iterator(
        txt_dataset,
        vocab_table,
        index_table,
        batch_size = batch_size_placeholder)

    model = model_creator(
        hparams,
        iterator = iterator,
        mode = tf.contrib.learn.ModeKeys.INFER,
        vocab_table = vocab_table)

  return InferModel(
      graph = graph,
      model = model,
      txt_placeholder = txt_placeholder,
      batch_size_placeholder = batch_size_placeholder,
      iterator = iterator) 
開發者ID:MeteorYee,項目名稱:LSTM-CNN-CWS,代碼行數:43,代碼來源:model_helper.py

示例7: __init__

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_file [as 別名]
def __init__(self, corpus_dir, hparams=None, training=True, buffer_size=8192):
        """
        Args:
            corpus_dir: Name of the folder storing corpus files for training.
            hparams: The object containing the loaded hyper parameters. If None, it will be 
                    initialized here.
            training: Whether to use this object for training.
            buffer_size: The buffer size used for mapping process during data processing.
        """
        if hparams is None:
            self.hparams = HParams(corpus_dir).hparams
        else:
            self.hparams = hparams

        self.src_max_len = self.hparams.src_max_len
        self.tgt_max_len = self.hparams.tgt_max_len

        self.training = training
        self.text_set = None
        self.id_set = None

        vocab_file = os.path.join(corpus_dir, VOCAB_FILE)
        self.vocab_size, _ = check_vocab(vocab_file)
        self.vocab_table = lookup_ops.index_table_from_file(vocab_file,
                                                            default_value=self.hparams.unk_id)
        # print("vocab_size = {}".format(self.vocab_size))

        if training:
            self.case_table = prepare_case_table()
            self.reverse_vocab_table = None
            self._load_corpus(corpus_dir)
            self._convert_to_tokens(buffer_size)
        else:
            self.case_table = None
            self.reverse_vocab_table = \
                lookup_ops.index_to_string_table_from_file(vocab_file,
                                                           default_value=self.hparams.unk_token) 
開發者ID:bshao001,項目名稱:ChatLearner,代碼行數:39,代碼來源:tokenizeddata.py

示例8: create_vocab_table

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_file [as 別名]
def create_vocab_table(vocab_file):
    """Creates vocab tables for vocab_file."""
    return lookup_ops.index_table_from_file(vocab_file, default_value=UNK_ID) 
開發者ID:nouhadziri,項目名稱:THRED,代碼行數:5,代碼來源:vocab.py

示例9: create_vocab_tables

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_file [as 別名]
def create_vocab_tables(vocab_file):
  """Creates vocab tables for src_vocab_file and tgt_vocab_file."""
  vocab_table = lookup_ops.index_table_from_file(
      vocab_file, default_value=0)
  return vocab_table 
開發者ID:renqianluo,項目名稱:NAO,代碼行數:7,代碼來源:decoder_main.py

示例10: create_vocab_tables

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_file [as 別名]
def create_vocab_tables(src_vocab_file, tgt_vocab_file, share_vocab, vocab_size):
    src_vocab_table = lookup_ops.index_table_from_file(
        src_vocab_file, default_value=vocab_size)
    if share_vocab:
        tgt_vocab_table = src_vocab_table
    else:
        tgt_vocab_table = lookup_ops.index_table_from_file(
            tgt_vocab_file, default_value=vocab_size)
    return src_vocab_table, tgt_vocab_table 
開發者ID:lovecambi,項目名稱:qebrain,代碼行數:11,代碼來源:expert_model.py

示例11: create_vocab_tables

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_file [as 別名]
def create_vocab_tables(src_vocab_file, tgt_vocab_file, share_vocab):
    """Creates vocab tables for src_vocab_file and tgt_vocab_file."""
    src_vocab_table = lookup_ops.index_table_from_file(src_vocab_file, default_value=UNK_ID)
    if share_vocab:
        tgt_vocab_table = src_vocab_table
    else:
        tgt_vocab_table = lookup_ops.index_table_from_file(tgt_vocab_file, default_value=UNK_ID)
    return src_vocab_table, tgt_vocab_table 
開發者ID:NervanaSystems,項目名稱:nlp-architect,代碼行數:10,代碼來源:vocab_utils.py

示例12: create_input_data

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_file [as 別名]
def create_input_data(source_data_file, target_data_file,
                      vocab_file,
                      batch_size, sos, eos, unk_id,
                      source_max_length, target_max_length):
  source_dataset = tf.data.TextLineDataset(tf.gfile.Glob(source_data_file))
  target_dataset = tf.data.TextLineDataset(tf.gfile.Glob(target_data_file))
  vocab = lookup_ops.index_table_from_file(vocab_file, default_value=unk_id)

  output_buffer_size = batch_size * 1000

  sos_id = tf.cast(vocab.lookup(tf.constant(sos)), tf.int32)
  eos_id = tf.cast(vocab.lookup(tf.constant(eos)), tf.int32)

  dataset = tf.data.Dataset.zip((source_dataset, target_dataset))
  dataset = dataset.map(
    lambda src, tgt: (tf.string_split([src]).values,
                      tf.string_split([tgt]).values)).prefetch(output_buffer_size)
  dataset = dataset.filter(
    lambda src, tgt: tf.logical_and(tf.size(src) > 0, tf.size(tgt) > 0))
  # dataset = dataset.map(
  #   lambda src, tgt: (src[:source_max_length], tgt[:target_max_length]))
  dataset = dataset.filter(
    lambda src, tgt: tf.logical_and(tf.size(src) <= source_max_length, tf.size(tgt) <= target_max_length))
  dataset = dataset.prefetch(output_buffer_size)

  dataset = dataset.map(
    lambda src, tgt: (tf.cast(vocab.lookup(src), tf.int32),
                      tf.cast(vocab.lookup(tgt), tf.int32)))
  dataset = dataset.prefetch(output_buffer_size)

  dataset = dataset.map(
    lambda src, tgt: (src,
                      tf.concat(([sos_id], tgt), 0),
                      tf.concat((tgt, [eos_id]), 0))).prefetch(output_buffer_size)

  dataset = dataset.map(
    lambda src, tgt_in, tgt_out: (
      src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_in))).prefetch(output_buffer_size)

  dataset = dataset.shuffle(100).repeat().padded_batch(
    batch_size,
    padded_shapes=(tf.TensorShape([None]),
                   tf.TensorShape([None]),
                   tf.TensorShape([None]),
                   tf.TensorShape([]),
                   tf.TensorShape([])),
    padding_values=(eos_id,
                    eos_id,
                    eos_id,
                    0,
                    0))

  iterator = dataset.make_initializable_iterator()

  return iterator.get_next(), iterator.initializer, vocab

# ======================== SEQ2SEQ NETWORK ============================= 
開發者ID:ChunML,項目名稱:NLP,代碼行數:59,代碼來源:train_bahdanau.py

示例13: create_input_data

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_file [as 別名]
def create_input_data(source_data_file, target_data_file,
                      source_vocab_file, target_vocab_file,
                      batch_size, sos, eos, unk_id,
                      source_max_length, target_max_length):
  source_dataset = tf.data.TextLineDataset(tf.gfile.Glob(source_data_file))
  target_dataset = tf.data.TextLineDataset(tf.gfile.Glob(target_data_file))
  source_vocab = lookup_ops.index_table_from_file(
    source_vocab_file, default_value=unk_id)
  target_vocab = lookup_ops.index_table_from_file(
    target_vocab_file, default_value=unk_id)

  output_buffer_size = batch_size * 1000

  source_eos_id = tf.cast(source_vocab.lookup(tf.constant(eos)), tf.int32)
  target_sos_id = tf.cast(target_vocab.lookup(tf.constant(sos)), tf.int32)
  target_eos_id = tf.cast(target_vocab.lookup(tf.constant(eos)), tf.int32)

  dataset = tf.data.Dataset.zip((source_dataset, target_dataset))
  dataset = dataset.map(
    lambda src, tgt: (tf.string_split([src]).values,
                      tf.string_split([tgt]).values)).prefetch(output_buffer_size)
  dataset = dataset.filter(
    lambda src, tgt: tf.logical_and(tf.size(src) > 0, tf.size(tgt) > 0))
  dataset = dataset.map(
    lambda src, tgt: (src[:source_max_length], tgt[:target_max_length]))
  dataset = dataset.prefetch(output_buffer_size)

  dataset = dataset.map(
    lambda src, tgt: (tf.cast(source_vocab.lookup(src), tf.int32),
                      tf.cast(target_vocab.lookup(tgt), tf.int32)))
  dataset = dataset.prefetch(output_buffer_size)

  dataset = dataset.map(
    lambda src, tgt: (src,
                      tf.concat(([target_sos_id], tgt), 0),
                      tf.concat((tgt, [target_eos_id]), 0))).prefetch(output_buffer_size)

  dataset = dataset.map(
    lambda src, tgt_in, tgt_out: (
      src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_in))).prefetch(output_buffer_size)

  dataset = dataset.shuffle(100).repeat().padded_batch(
    batch_size,
    padded_shapes=(tf.TensorShape([None]),
                   tf.TensorShape([None]),
                   tf.TensorShape([None]),
                   tf.TensorShape([]),
                   tf.TensorShape([])),
    padding_values=(source_eos_id,
                    target_eos_id,
                    target_eos_id,
                    0,
                    0))

  iterator = dataset.make_initializable_iterator()

  return iterator.get_next(), iterator.initializer, source_vocab, target_vocab

# ======================== SEQ2SEQ NETWORK ============================= 
開發者ID:ChunML,項目名稱:NLP,代碼行數:61,代碼來源:train_bahdanau.py

示例14: create_input_data

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_file [as 別名]
def create_input_data(source_data_file, target_data_file,
                      source_vocab_file, target_vocab_file,
                      batch_size, sos, eos,
                      source_max_length, target_max_length):
  source_dataset = tf.data.TextLineDataset(tf.gfile.Glob(source_data_file))
  target_dataset = tf.data.TextLineDataset(tf.gfile.Glob(target_data_file))
  source_vocab = lookup_ops.index_table_from_file(
    source_vocab_file, default_value=FLAGS.unk_id)
  target_vocab = lookup_ops.index_table_from_file(
    target_vocab_file, default_value=FLAGS.unk_id)

  output_buffer_size = batch_size * 1000

  source_eos_id = tf.cast(source_vocab.lookup(tf.constant(eos)), tf.int32)
  target_sos_id = tf.cast(target_vocab.lookup(tf.constant(sos)), tf.int32)
  target_eos_id = tf.cast(target_vocab.lookup(tf.constant(eos)), tf.int32)

  dataset = tf.data.Dataset.zip((source_dataset, target_dataset))
  dataset = dataset.map(
    lambda src, tgt: (tf.string_split([src]).values,
                      tf.string_split([tgt]).values)).prefetch(output_buffer_size)
  dataset = dataset.filter(
    lambda src, tgt: tf.logical_and(tf.size(src) > 0, tf.size(tgt) > 0))
  dataset = dataset.map(
    lambda src, tgt: (src[:source_max_length], tgt[:target_max_length]))
  dataset = dataset.prefetch(output_buffer_size)

  dataset = dataset.map(
    lambda src, tgt: (tf.cast(source_vocab.lookup(src), tf.int32),
                      tf.cast(target_vocab.lookup(tgt), tf.int32)))
  dataset = dataset.prefetch(output_buffer_size)

  dataset = dataset.map(
    lambda src, tgt: (src,
                      tf.concat(([target_sos_id], tgt), 0),
                      tf.concat((tgt, [target_eos_id]), 0))).prefetch(output_buffer_size)

  dataset = dataset.map(
    lambda src, tgt_in, tgt_out: (
      src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_in))).prefetch(output_buffer_size)

  dataset = dataset.shuffle(100).repeat().padded_batch(
    batch_size,
    padded_shapes=(tf.TensorShape([None]),
                   tf.TensorShape([None]),
                   tf.TensorShape([None]),
                   tf.TensorShape([]),
                   tf.TensorShape([])),
    padding_values=(source_eos_id,
                    target_eos_id,
                    target_eos_id,
                    0,
                    0))

  iterator = dataset.make_initializable_iterator()

  return iterator.get_next(), iterator.initializer, source_vocab, target_vocab

# ======================== SEQ2SEQ NETWORK ============================= 
開發者ID:ChunML,項目名稱:NLP,代碼行數:61,代碼來源:train_bi.py

示例15: create_input_data

# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_table_from_file [as 別名]
def create_input_data(source_data_file, target_data_file,
                      source_vocab_file, target_vocab_file,
                      batch_size, sos, eos, unk_id,
                      source_max_length, target_max_length):
  source_dataset = tf.data.TextLineDataset(tf.gfile.Glob(source_data_file))
  target_dataset = tf.data.TextLineDataset(tf.gfile.Glob(target_data_file))
  source_vocab = lookup_ops.index_table_from_file(
    source_vocab_file, default_value=unk_id)
  target_vocab = lookup_ops.index_table_from_file(
    target_vocab_file, default_value=unk_id)

  output_buffer_size = batch_size * 1000

  source_eos_id = tf.cast(source_vocab.lookup(tf.constant(eos)), tf.int32)
  target_sos_id = tf.cast(target_vocab.lookup(tf.constant(sos)), tf.int32)
  target_eos_id = tf.cast(target_vocab.lookup(tf.constant(eos)), tf.int32)

  dataset = tf.data.Dataset.zip((source_dataset, target_dataset))
  dataset = dataset.map(
    lambda src, tgt: (tf.string_split([src]).values,
                      tf.string_split([tgt]).values)).prefetch(output_buffer_size)
  dataset = dataset.filter(
    lambda src, tgt: tf.logical_and(tf.size(src) > 0, tf.size(tgt) > 0))
  dataset = dataset.map(
    lambda src, tgt: (src[:source_max_length], tgt[:target_max_length]))
  dataset = dataset.prefetch(output_buffer_size)

  dataset = dataset.map(
    lambda src, tgt: (tf.cast(source_vocab.lookup(src), tf.int32),
                      tf.cast(target_vocab.lookup(tgt), tf.int32)))
  dataset = dataset.prefetch(output_buffer_size)

  dataset = dataset.map(
    lambda src, tgt: (tf.reverse(src, axis=[0]),
                      tf.concat(([target_sos_id], tgt), 0),
                      tf.concat((tgt, [target_eos_id]), 0))).prefetch(output_buffer_size)

  dataset = dataset.map(
    lambda src, tgt_in, tgt_out: (
      src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_in))).prefetch(output_buffer_size)

  dataset = dataset.shuffle(100).repeat().padded_batch(
    batch_size,
    padded_shapes=(tf.TensorShape([None]),
                   tf.TensorShape([None]),
                   tf.TensorShape([None]),
                   tf.TensorShape([]),
                   tf.TensorShape([])),
    padding_values=(source_eos_id,
                    target_eos_id,
                    target_eos_id,
                    0,
                    0))

  iterator = dataset.make_initializable_iterator()

  return iterator.get_next(), iterator.initializer, source_vocab, target_vocab

# ======================== SEQ2SEQ NETWORK ============================= 
開發者ID:ChunML,項目名稱:NLP,代碼行數:61,代碼來源:train_luong.py


注:本文中的tensorflow.python.ops.lookup_ops.index_table_from_file方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。