当前位置: 首页>>代码示例>>Python>>正文


Python utils.GetListOfFeatureNamesAndSizes方法代码示例

本文整理汇总了Python中utils.GetListOfFeatureNamesAndSizes方法的典型用法代码示例。如果您正苦于以下问题:Python utils.GetListOfFeatureNamesAndSizes方法的具体用法?Python utils.GetListOfFeatureNamesAndSizes怎么用?Python utils.GetListOfFeatureNamesAndSizes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utils的用法示例。


在下文中一共展示了utils.GetListOfFeatureNamesAndSizes方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import GetListOfFeatureNamesAndSizes [as 别名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  # convert feature_names and feature_sizes to lists of values
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      FLAGS.feature_names, FLAGS.feature_sizes)

  if FLAGS.frame_features:
    reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                            feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                 feature_sizes=feature_sizes)

  if FLAGS.output_file is "":
    raise ValueError("'output_file' was not specified. "
      "Unable to continue with inference.")

  if FLAGS.input_data_pattern is "":
    raise ValueError("'input_data_pattern' was not specified. "
      "Unable to continue with inference.")

  inference(reader, FLAGS.train_dir, FLAGS.input_data_pattern,
    FLAGS.output_file, FLAGS.batch_size, FLAGS.top_k) 
开发者ID:antoine77340,项目名称:Youtube-8M-WILLOW,代码行数:26,代码来源:inference.py

示例2: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import GetListOfFeatureNamesAndSizes [as 别名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  # convert feature_names and feature_sizes to lists of values
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      FLAGS.feature_names, FLAGS.feature_sizes)

  if FLAGS.frame_features:
    reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                            feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                 feature_sizes=feature_sizes)

  if FLAGS.output_dir is "":
    raise ValueError("'output_dir' was not specified. "
      "Unable to continue with inference.")

  if FLAGS.input_data_pattern is "":
    raise ValueError("'input_data_pattern' was not specified. "
      "Unable to continue with inference.")

  inference(reader, FLAGS.model_checkpoint_path, FLAGS.input_data_pattern,
      FLAGS.output_dir, FLAGS.batch_size, FLAGS.top_k) 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:26,代码来源:inference-pre-ensemble.py

示例3: get_reader

# 需要导入模块: import utils [as 别名]
# 或者: from utils import GetListOfFeatureNamesAndSizes [as 别名]
def get_reader():
  # Convert feature_names and feature_sizes to lists of values.
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      FLAGS.feature_names, FLAGS.feature_sizes)

  if FLAGS.frame_features:
    reader = readers.YT8MFrameFeatureReader(
        num_classes = FLAGS.truncated_num_classes,
        decode_zlib = FLAGS.decode_zlib,
        feature_names=feature_names, feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(
        num_classes = FLAGS.truncated_num_classes,
        decode_zlib = FLAGS.decode_zlib,
        feature_names=feature_names, feature_sizes=feature_sizes)

  return reader 
开发者ID:mpekalski,项目名称:Y8M,代码行数:19,代码来源:train.py

示例4: get_reader

# 需要导入模块: import utils [as 别名]
# 或者: from utils import GetListOfFeatureNamesAndSizes [as 别名]
def get_reader():
  # Convert feature_names and feature_sizes to lists of values.
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      FLAGS.feature_names, FLAGS.feature_sizes)

  if FLAGS.frame_features:
    reader = readers.YT8MFrameFeatureReader(
        num_classes = FLAGS.truncated_num_classes,
        feature_names=feature_names, feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(
        num_classes = FLAGS.truncated_num_classes,
        decode_zlib = FLAGS.decode_zlib,
        feature_names=feature_names, feature_sizes=feature_sizes, feature_calcs=FLAGS.c_vars, feature_remove=FLAGS.r_vars)

  return reader 
开发者ID:mpekalski,项目名称:Y8M,代码行数:18,代码来源:train.py

示例5: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import GetListOfFeatureNamesAndSizes [as 别名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  # convert feature_names and feature_sizes to lists of values
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      FLAGS.feature_names, FLAGS.feature_sizes)

  if FLAGS.frame_features:
    reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                            feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                 feature_sizes=feature_sizes)

  if FLAGS.output_file is "":
    raise ValueError("'output_file' was not specified. "
      "Unable to continue with inference.")

  if FLAGS.input_data_pattern is "":
    raise ValueError("'input_data_pattern' was not specified. "
      "Unable to continue with inference.")

  inference(reader, FLAGS.checkpoint_file, FLAGS.train_dir, FLAGS.input_data_pattern,
    FLAGS.output_file, FLAGS.batch_size, FLAGS.top_k) 
开发者ID:mpekalski,项目名称:Y8M,代码行数:26,代码来源:inference.py

示例6: get_reader

# 需要导入模块: import utils [as 别名]
# 或者: from utils import GetListOfFeatureNamesAndSizes [as 别名]
def get_reader():
  # Convert feature_names and feature_sizes to lists of values.
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      FLAGS.feature_names, FLAGS.feature_sizes)

  if FLAGS.frame_features:
    reader = readers.YT8MFrameFeatureReader(
        feature_names=feature_names, feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(
        feature_names=feature_names, feature_sizes=feature_sizes)
    
  return reader 
开发者ID:antoine77340,项目名称:Youtube-8M-WILLOW,代码行数:15,代码来源:train.py

示例7: check_video_id

# 需要导入模块: import utils [as 别名]
# 或者: from utils import GetListOfFeatureNamesAndSizes [as 别名]
def check_video_id():
  tf.set_random_seed(0)  # for reproducibility
  with tf.Graph().as_default():
    # convert feature_names and feature_sizes to lists of values
    feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
        FLAGS.feature_names, FLAGS.feature_sizes)

    # prepare a reader for each single model prediction result
    all_readers = []

    all_patterns = FLAGS.eval_data_patterns
    all_patterns = map(lambda x: x.strip(), all_patterns.strip().strip(",").split(","))
    for i in xrange(len(all_patterns)):
      reader = readers.EnsembleReader(
          feature_names=feature_names, feature_sizes=feature_sizes)
      all_readers.append(reader)

    input_reader = None
    input_data_pattern = None
    if FLAGS.input_data_pattern is not None:
      input_reader = readers.EnsembleReader(
          feature_names=["mean_rgb","mean_audio"], feature_sizes=[1024,128])
      input_data_pattern = FLAGS.input_data_pattern

    if FLAGS.eval_data_patterns is "":
      raise IOError("'eval_data_patterns' was not specified. " +
                     "Nothing to evaluate.")

    build_graph(
        all_readers=all_readers,
        input_reader=input_reader,
        input_data_pattern=input_data_pattern,
        all_eval_data_patterns=all_patterns,
        batch_size=FLAGS.batch_size)

    logging.info("built evaluation graph")
    video_id_equal = tf.get_collection("video_id_equal")[0]
    input_distance = tf.get_collection("input_distance")[0]

    check_loop(video_id_equal, input_distance, all_patterns) 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:42,代码来源:check_video_id.py

示例8: build_model

# 需要导入模块: import utils [as 别名]
# 或者: from utils import GetListOfFeatureNamesAndSizes [as 别名]
def build_model(self):
    """Find the model and build the graph."""

    # Convert feature_names and feature_sizes to lists of values.
    feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
        FLAGS.feature_names, FLAGS.feature_sizes)

    if FLAGS.frame_features:
      if FLAGS.frame_only:
          reader = readers.YT8MFrameFeatureOnlyReader(
              feature_names=feature_names, feature_sizes=feature_sizes)
      else:
          reader = readers.YT8MFrameFeatureReader(
              feature_names=feature_names, feature_sizes=feature_sizes)
    else:
      reader = readers.YT8MAggregatedFeatureReader(
          feature_names=feature_names, feature_sizes=feature_sizes)

    # Find the model.
    model = find_class_by_name(FLAGS.model,
                               [labels_embedding])()
    label_loss_fn = find_class_by_name(FLAGS.label_loss, [losses_embedding])()
    optimizer_class = find_class_by_name(FLAGS.optimizer, [tf.train])

    build_graph(reader=reader,
                 model=model,
                 optimizer_class=optimizer_class,
                 clip_gradient_norm=FLAGS.clip_gradient_norm,
                 train_data_pattern=FLAGS.train_data_pattern,
                 label_loss_fn=label_loss_fn,
                 base_learning_rate=FLAGS.base_learning_rate,
                 learning_rate_decay=FLAGS.learning_rate_decay,
                 learning_rate_decay_examples=FLAGS.learning_rate_decay_examples,
                 regularization_penalty=FLAGS.regularization_penalty,
                 num_readers=FLAGS.num_readers,
                 batch_size=FLAGS.batch_size,
                 num_epochs=FLAGS.num_epochs)

    logging.info("%s: Built graph.", task_as_string(self.task))

    return tf.train.Saver(max_to_keep=2, keep_checkpoint_every_n_hours=0.25) 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:43,代码来源:train_embedding.py

示例9: build_model

# 需要导入模块: import utils [as 别名]
# 或者: from utils import GetListOfFeatureNamesAndSizes [as 别名]
def build_model(self):
    """Find the model and build the graph."""

    # Convert feature_names and feature_sizes to lists of values.
    feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
        FLAGS.feature_names, FLAGS.feature_sizes)

    if FLAGS.frame_features:
      if FLAGS.frame_only:
          reader = readers.YT8MFrameFeatureOnlyReader(
              feature_names=feature_names, feature_sizes=feature_sizes)
      else:
          reader = readers.YT8MFrameFeatureReader(
              feature_names=feature_names, feature_sizes=feature_sizes)
    else:
      reader = readers.YT8MAggregatedFeatureReader(
          feature_names=feature_names, feature_sizes=feature_sizes)

    # Find the model.
    model = find_class_by_name(FLAGS.model,
                               [labels_autoencoder])()
    label_loss_fn = find_class_by_name(FLAGS.label_loss, [losses])()
    optimizer_class = find_class_by_name(FLAGS.optimizer, [tf.train])

    build_graph(reader=reader,
                 model=model,
                 optimizer_class=optimizer_class,
                 clip_gradient_norm=FLAGS.clip_gradient_norm,
                 train_data_pattern=FLAGS.train_data_pattern,
                 label_loss_fn=label_loss_fn,
                 base_learning_rate=FLAGS.base_learning_rate,
                 learning_rate_decay=FLAGS.learning_rate_decay,
                 learning_rate_decay_examples=FLAGS.learning_rate_decay_examples,
                 regularization_penalty=FLAGS.regularization_penalty,
                 num_readers=FLAGS.num_readers,
                 batch_size=FLAGS.batch_size,
                 num_epochs=FLAGS.num_epochs)

    logging.info("%s: Built graph.", task_as_string(self.task))

    return tf.train.Saver(max_to_keep=2, keep_checkpoint_every_n_hours=0.25) 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:43,代码来源:train_autoencoder.py

示例10: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import GetListOfFeatureNamesAndSizes [as 别名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  # convert feature_names and feature_sizes to lists of values
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      FLAGS.feature_names, FLAGS.feature_sizes)

  if FLAGS.frame_features:
    reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                            feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                 feature_sizes=feature_sizes)

  if FLAGS.output_file is "":
    raise ValueError("'output_file' was not specified. "
      "Unable to continue with inference.")

  if FLAGS.input_data_pattern is "":
    raise ValueError("'input_data_pattern' was not specified. "
      "Unable to continue with inference.")

  model = find_class_by_name(FLAGS.model,
                             [frame_level_models, video_level_models])()
  transformer_fn = find_class_by_name(FLAGS.feature_transformer, 
                                         [feature_transform])

  build_graph(reader,
              model,
              input_data_pattern=FLAGS.input_data_pattern,
              batch_size=FLAGS.batch_size,
              transformer_class=transformer_fn)

  saver = tf.train.Saver(max_to_keep=3, keep_checkpoint_every_n_hours=10000000000)

  inference(saver, FLAGS.train_dir,
            FLAGS.output_file, FLAGS.batch_size, FLAGS.top_k) 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:39,代码来源:inference-sample-error.py

示例11: lstm

# 需要导入模块: import utils [as 别名]
# 或者: from utils import GetListOfFeatureNamesAndSizes [as 别名]
def lstm(self, model_input, vocab_size, num_frames, sub_scope="", **unused_params):
    number_of_layers = FLAGS.lstm_layers
    lstm_sizes = map(int, FLAGS.lstm_cells.split(","))
    feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
        FLAGS.feature_names, FLAGS.feature_sizes)
    sub_inputs = [tf.nn.l2_normalize(x, dim=2) for x in tf.split(model_input, feature_sizes, axis = 2)]

    assert len(lstm_sizes) == len(feature_sizes), \
      "length of lstm_sizes (={}) != length of feature_sizes (={})".format( \
      len(lstm_sizes), len(feature_sizes))

    states = []
    for i in xrange(len(feature_sizes)):
      with tf.variable_scope(sub_scope+"RNN%d" % i):
        sub_input = sub_inputs[i]
        lstm_size = lstm_sizes[i]
        ## Batch normalize the input
        stacked_lstm = tf.contrib.rnn.MultiRNNCell(
                [
                    tf.contrib.rnn.BasicLSTMCell(
                        lstm_size, forget_bias=1.0, state_is_tuple=True)
                    for _ in range(number_of_layers)
                    ],
                state_is_tuple=True)
        output, state = tf.nn.dynamic_rnn(stacked_lstm, sub_input,
                                         sequence_length=num_frames,
                                         swap_memory=FLAGS.rnn_swap_memory,
                                         dtype=tf.float32)
        states.extend(map(lambda x: x.c, state))
    final_state = tf.concat(states, axis = 1)
    return final_state 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:33,代码来源:multires_lstm_memory_deep_combine_chain_model.py

示例12: lstmoutput

# 需要导入模块: import utils [as 别名]
# 或者: from utils import GetListOfFeatureNamesAndSizes [as 别名]
def lstmoutput(self, model_input, vocab_size, num_frames):

    number_of_layers = FLAGS.lstm_layers

    lstm_sizes = map(int, FLAGS.lstm_cells.split(","))
    feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
        FLAGS.feature_names, FLAGS.feature_sizes)
    sub_inputs = [tf.nn.l2_normalize(x, dim=2) for x in tf.split(model_input, feature_sizes, axis = 2)]

    assert len(lstm_sizes) == len(feature_sizes), \
      "length of lstm_sizes (={}) != length of feature_sizes (={})".format( \
      len(lstm_sizes), len(feature_sizes))

    outputs = []
    for i in xrange(len(feature_sizes)):
      with tf.variable_scope("RNN%d" % i):
        sub_input = sub_inputs[i]
        lstm_size = lstm_sizes[i]
        ## Batch normalize the input
        stacked_lstm = tf.contrib.rnn.MultiRNNCell(
                [
                    tf.contrib.rnn.BasicLSTMCell(
                        lstm_size, forget_bias=1.0, state_is_tuple=True)
                    for _ in range(number_of_layers)
                    ],
                state_is_tuple=True)

        output, state = tf.nn.dynamic_rnn(stacked_lstm, sub_input,
                                         sequence_length=num_frames,
                                         swap_memory=FLAGS.rnn_swap_memory,
                                         dtype=tf.float32)
        outputs.append(output)

    # concat
    final_output = tf.concat(outputs, axis=2)
    return final_output 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:38,代码来源:lstm_cnn_deep_combine_chain_model.py

示例13: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import GetListOfFeatureNamesAndSizes [as 别名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)
  if FLAGS.input_model_tgz:
    if FLAGS.train_dir:
      raise ValueError("You cannot supply --train_dir if supplying "
                       "--input_model_tgz")
    # Untar.
    if not os.path.exists(FLAGS.untar_model_dir):
      os.makedirs(FLAGS.untar_model_dir)
    tarfile.open(FLAGS.input_model_tgz).extractall(FLAGS.untar_model_dir)
    FLAGS.train_dir = FLAGS.untar_model_dir

  flags_dict_file = os.path.join(FLAGS.train_dir, "model_flags.json")
  if not file_io.file_exists(flags_dict_file):
    raise IOError("Cannot find %s. Did you run eval.py?" % flags_dict_file)
  flags_dict = json.loads(file_io.FileIO(flags_dict_file, "r").read())

  # convert feature_names and feature_sizes to lists of values
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      flags_dict["feature_names"], flags_dict["feature_sizes"])

  if flags_dict["frame_features"]:
    reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                            feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                 feature_sizes=feature_sizes)

  if not FLAGS.output_file:
    raise ValueError("'output_file' was not specified. "
                     "Unable to continue with inference.")

  if not FLAGS.input_data_pattern:
    raise ValueError("'input_data_pattern' was not specified. "
                     "Unable to continue with inference.")

  inference(reader, FLAGS.train_dir, FLAGS.input_data_pattern,
            FLAGS.output_file, FLAGS.batch_size, FLAGS.top_k) 
开发者ID:google,项目名称:youtube-8m,代码行数:40,代码来源:inference.py

示例14: get_reader

# 需要导入模块: import utils [as 别名]
# 或者: from utils import GetListOfFeatureNamesAndSizes [as 别名]
def get_reader():
  # Convert feature_names and feature_sizes to lists of values.
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      FLAGS.feature_names, FLAGS.feature_sizes)

  if FLAGS.frame_features:
    reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                            feature_sizes=feature_sizes,
                                            segment_labels=FLAGS.segment_labels)
  else:
    reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                 feature_sizes=feature_sizes)

  return reader 
开发者ID:google,项目名称:youtube-8m,代码行数:16,代码来源:train.py

示例15: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import GetListOfFeatureNamesAndSizes [as 别名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)
  if FLAGS.input_model_tgz:
    if FLAGS.train_dir:
      raise ValueError("You cannot supply --train_dir if supplying "
                       "--input_model_tgz")
    # Untar.
    if not os.path.exists(FLAGS.untar_model_dir):
      os.makedirs(FLAGS.untar_model_dir)
    tarfile.open(FLAGS.input_model_tgz).extractall(FLAGS.untar_model_dir)
    FLAGS.train_dir = FLAGS.untar_model_dir

  flags_dict_file = os.path.join(FLAGS.train_dir, "model_flags.json")
  if not os.path.exists(flags_dict_file):
    raise IOError("Cannot find %s. Did you run eval.py?" % flags_dict_file)
  flags_dict = json.loads(open(flags_dict_file).read())

  # convert feature_names and feature_sizes to lists of values
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      flags_dict["feature_names"], flags_dict["feature_sizes"])

  if flags_dict["frame_features"]:
    reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                            feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                 feature_sizes=feature_sizes)

  if FLAGS.output_file is "":
    raise ValueError("'output_file' was not specified. "
      "Unable to continue with inference.")

  if FLAGS.input_data_pattern is "":
    raise ValueError("'input_data_pattern' was not specified. "
      "Unable to continue with inference.")

  inference(reader, FLAGS.train_dir, FLAGS.input_data_pattern,
    FLAGS.output_file, FLAGS.batch_size, FLAGS.top_k) 
开发者ID:miha-skalic,项目名称:youtube8mchallenge,代码行数:40,代码来源:inference_gpu.py


注:本文中的utils.GetListOfFeatureNamesAndSizes方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。