当前位置: 首页>>代码示例>>Python>>正文


Python FLAGS.batch_size方法代码示例

本文整理汇总了Python中config.FLAGS.batch_size方法的典型用法代码示例。如果您正苦于以下问题:Python FLAGS.batch_size方法的具体用法?Python FLAGS.batch_size怎么用?Python FLAGS.batch_size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在config.FLAGS的用法示例。


在下文中一共展示了FLAGS.batch_size方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: guided_loss

# 需要导入模块: from config import FLAGS [as 别名]
# 或者: from config.FLAGS import batch_size [as 别名]
def guided_loss(student_stages_output_tensor, teacher_stages_output_tensor):
    if len(student_stages_output_tensor) != len(teacher_stages_output_tensor):
        raise ValueError('Length must be equal between teacher and student nodes')

    batch_size = tf.cast(tf.shape(student_stages_output_tensor[0])[0], dtype=tf.float32)

    stages = len(student_stages_output_tensor)
    stage_loss = [0 for _ in range(stages)]
    total_loss = 0
    for stage in range(stages):
        with tf.variable_scope('stage_' + str(stage + 1) + '_loss'):
            stage_loss[stage] = tf.nn.l2_loss(student_stages_output_tensor[stage] -
                                              teacher_stages_output_tensor[stage], name='L2_loss') / batch_size
            tf.summary.scalar('stage_' + str(stage + 1) + '_loss', stage_loss[stage])
        with tf.variable_scope('total_loss'):
            total_loss += stage_loss[stage]
    tf.summary.scalar('total loss', total_loss)
    return total_loss, stage_loss 
开发者ID:timctho,项目名称:convolutional-pose-machines-tensorflow,代码行数:20,代码来源:run_training_distillation.py

示例2: next_batch

# 需要导入模块: from config import FLAGS [as 别名]
# 或者: from config.FLAGS import batch_size [as 别名]
def next_batch(self, batch_size):
        """Return the next `batch_size` examples from this data set."""
        start = self._index_in_epoch
        self._index_in_epoch += batch_size
        
        if self._index_in_epoch > self._num_examples:
            # Finished epoch
            self._epochs_completed += 1
            
            # Shuffle the data
            perm = np.arange(self._num_examples)
            np.random.shuffle(perm)
            self._images = self._examples[perm]
            
            # Start next epoch
            start = 0
            self._index_in_epoch = batch_size

#             print self._num_examples
            assert batch_size <= self._num_examples
            
        end = self._index_in_epoch
        
        return self._examples[start:end] 
开发者ID:glrs,项目名称:StackedDAE,代码行数:26,代码来源:utils.py

示例3: gt_loss

# 需要导入模块: from config import FLAGS [as 别名]
# 或者: from config.FLAGS import batch_size [as 别名]
def gt_loss(student_stages_output_tensor, gt_output):
    batch_size = tf.cast(tf.shape(student_stages_output_tensor[0])[0], dtype=tf.float32)

    stages = len(student_stages_output_tensor)
    stage_loss_gt = [0 for _ in range(stages)]
    total_loss_gt = 0
    for stage in range(stages):
        with tf.variable_scope('stage_' + str(stage + 1) + '_gt_loss'):
            stage_loss_gt[stage] = tf.nn.l2_loss(student_stages_output_tensor[stage] -
                                                 gt_output, name='L2_loss') / batch_size
            tf.summary.scalar('stage_' + str(stage + 1) + '_gt_loss', stage_loss_gt[stage])
        with tf.variable_scope('total_gt_loss'):
            total_loss_gt += stage_loss_gt[stage]
    tf.summary.scalar('gt loss', total_loss_gt)
    return total_loss_gt, stage_loss_gt 
开发者ID:timctho,项目名称:convolutional-pose-machines-tensorflow,代码行数:17,代码来源:run_training_distillation.py

示例4: train

# 需要导入模块: from config import FLAGS [as 别名]
# 或者: from config.FLAGS import batch_size [as 别名]
def train(dialog, batch_size=100, epoch=100):
    model = Seq2Seq(dialog.vocab_size)

    with tf.Session() as sess:
        # TODO: 세션을 로드하고 로그를 위한 summary 저장등의 로직을 Seq2Seq 모델로 넣을 필요가 있음
        ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
        if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
            print("다음 파일에서 모델을 읽는 중 입니다..", ckpt.model_checkpoint_path)
            model.saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print("새로운 모델을 생성하는 중 입니다.")
            sess.run(tf.global_variables_initializer())

        writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)

        total_batch = int(math.ceil(len(dialog.examples)/float(batch_size)))

        for step in range(total_batch * epoch):
            enc_input, dec_input, targets = dialog.next_batch(batch_size)

            _, loss = model.train(sess, enc_input, dec_input, targets)

            if (step + 1) % 100 == 0:
                model.write_logs(sess, writer, enc_input, dec_input, targets)

                print('Step:', '%06d' % model.global_step.eval(),
                      'cost =', '{:.6f}'.format(loss))

        checkpoint_path = os.path.join(FLAGS.train_dir, FLAGS.ckpt_name)
        model.saver.save(sess, checkpoint_path, global_step=model.global_step)

    print('최적화 완료!') 
开发者ID:HYU-AILAB,项目名称:ai-seminar,代码行数:34,代码来源:train.py

示例5: test

# 需要导入模块: from config import FLAGS [as 别名]
# 或者: from config.FLAGS import batch_size [as 别名]
def test(dialog, batch_size=100):
    print("\n=== 예측 테스트 ===")

    model = Seq2Seq(dialog.vocab_size)

    with tf.Session() as sess:
        ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
        print("다음 파일에서 모델을 읽는 중 입니다..", ckpt.model_checkpoint_path)
        model.saver.restore(sess, ckpt.model_checkpoint_path)

        enc_input, dec_input, targets = dialog.next_batch(batch_size)

        expect, outputs, accuracy = model.test(sess, enc_input, dec_input, targets)

        expect = dialog.decode(expect)
        outputs = dialog.decode(outputs)

        pick = random.randrange(0, len(expect) / 2)
        input = dialog.decode([dialog.examples[pick * 2]], True)
        expect = dialog.decode([dialog.examples[pick * 2 + 1]], True)
        outputs = dialog.cut_eos(outputs[pick])

        print("\n정확도:", accuracy)
        print("랜덤 결과\n")
        print("    입력값:", input)
        print("    실제값:", expect)
        print("    예측값:", ' '.join(outputs)) 
开发者ID:HYU-AILAB,项目名称:ai-seminar,代码行数:29,代码来源:train.py

示例6: main

# 需要导入模块: from config import FLAGS [as 别名]
# 或者: from config.FLAGS import batch_size [as 别名]
def main(_):
    dialog = Dialog()

    dialog.load_vocab(FLAGS.voc_path)
    dialog.load_examples(FLAGS.data_path)

    if FLAGS.train:
        train(dialog, batch_size=FLAGS.batch_size, epoch=FLAGS.epoch)
    elif FLAGS.test:
        test(dialog, batch_size=FLAGS.batch_size) 
开发者ID:HYU-AILAB,项目名称:ai-seminar,代码行数:12,代码来源:train.py

示例7: fill_feed_dict_dae

# 需要导入模块: from config import FLAGS [as 别名]
# 或者: from config.FLAGS import batch_size [as 别名]
def fill_feed_dict_dae(data_set, input_pl, batch_size=None):
    b_size = FLAGS.batch_size if batch_size is None else batch_size

    input_feed = data_set.next_batch(b_size)
    feed_dict = { input_pl: input_feed }

    return feed_dict 
开发者ID:glrs,项目名称:StackedDAE,代码行数:9,代码来源:utils.py

示例8: fill_feed_dict

# 需要导入模块: from config import FLAGS [as 别名]
# 或者: from config.FLAGS import batch_size [as 别名]
def fill_feed_dict(data_set, input_pl, labels_pl, batch_size=None):
    """Fills the feed_dict for training the given step.
    A feed_dict takes the form of:
    feed_dict = {
        <placeholder>: <tensor of values to be passed for placeholder>,
        ....
    }
    Args:
      data_set: The set of images and labels, from input_data.read_data_sets()
      images_pl: The examples placeholder, from placeholder_inputs().
      labels_pl: The labels placeholder, from placeholder_inputs().
    Returns:
      feed_dict: The feed dictionary mapping from placeholders to values.
    """
    # Create the feed_dict for the placeholders filled with the next
    # `batch size ` examples.
    b_size = FLAGS.batch_size if batch_size is None else batch_size
    
    examples_feed, labels_feed = data_set.next_batch(b_size)

    feed_dict = {
        input_pl: examples_feed,
        labels_pl: labels_feed
    }

    return feed_dict 
开发者ID:glrs,项目名称:StackedDAE,代码行数:28,代码来源:utils.py

示例9: evaluation

# 需要导入模块: from config import FLAGS [as 别名]
# 或者: from config.FLAGS import batch_size [as 别名]
def evaluation(logits, labels):
    """Evaluate the quality of the logits at predicting the label.
    
    Args:
      logits: Logits tensor, float - [batch_size, NUM_CLASSES].
      labels: Labels tensor, int32 - [batch_size], with values in the
        range [0, NUM_CLASSES).
    
    Returns:
      A scalar int32 tensor with the number of examples (out of batch_size)
      that were predicted correctly.
    """
    # For a classifier model, we can use the in_top_k Op.
    # It returns a bool tensor with shape [batch_size] that is true for
    # the examples where the labels was in the top k (here k=1)
    # of all logits for that example.
    # correct: type = List (of booleans)
    correct = tf.nn.in_top_k(logits, labels, 1)
#     correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
    
#     accuracy = tf.reduce_mean(tf.cast(correct, "float"))
    y_p = tf.argmax(logits, 1)
#     l_p = tf.argmax(labels, 1)

    
    # Return the number of true entries. Cast because originally is bool.
    return tf.reduce_sum(tf.cast(correct, tf.int32)), correct, y_p 
开发者ID:glrs,项目名称:StackedDAE,代码行数:29,代码来源:evaluate.py

示例10: test

# 需要导入模块: from config import FLAGS [as 别名]
# 或者: from config.FLAGS import batch_size [as 别名]
def test():
    assert FLAGS.ckpt != None, 'no model provided.'
    cube_len = FLAGS.cube_len
    incomp_dir = os.path.join(FLAGS.incomp_data_path, FLAGS.category)
    test_dir = os.path.join(FLAGS.output_dir, FLAGS.category, 'test')

    syn = tf.placeholder(tf.float32, [None, cube_len, cube_len, cube_len, 1], name='syn_data')
    syn_res = descriptor(syn, reuse=False)
    syn_langevin = langevin_dynamics(syn)

    train_data = data_io.getObj(FLAGS.data_path, FLAGS.category, train=True, cube_len=cube_len,
                                num_voxels=FLAGS.train_size, low_bound=0, up_bound=1)

    incomplete_data = data_io.getVoxelsFromMat('%s/incomplete_test.mat' % incomp_dir, data_name='voxels')
    masks = np.array(io.loadmat(('%s/masks.mat' % incomp_dir))['masks'], dtype=np.float32)

    sample_size = len(incomplete_data)

    masks = masks[..., np.newaxis]
    incomplete_data = incomplete_data[..., np.newaxis]
    voxel_mean = train_data.mean()
    incomplete_data = incomplete_data - voxel_mean
    num_batches = int(math.ceil(sample_size / FLAGS.batch_size))

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        print('Loading checkpoint {}.'.format(FLAGS.ckpt))
        saver.restore(sess, FLAGS.ckpt)

        init_data = incomplete_data.copy()
        sample_voxels = np.random.randn(sample_size, cube_len, cube_len, cube_len, 1)

        for i in range(num_batches):
            indices = slice(i * FLAGS.batch_size, min(sample_size, (i + 1) * FLAGS.batch_size))
            syn_data = init_data[indices]
            data_mask = masks[indices]

            # Langevin Sampling
            sample = sess.run(syn_langevin, feed_dict={syn: syn_data})

            sample_voxels[indices] = sample * (1 - data_mask) + syn_data * data_mask

        if not os.path.exists(test_dir):
            os.makedirs(test_dir)
        data_io.saveVoxelsToMat(sample_voxels + voxel_mean, "%s/recovery.mat" % test_dir, cmin=0, cmax=1) 
开发者ID:jianwen-xie,项目名称:3DDescriptorNet,代码行数:50,代码来源:rec_exp.py

示例11: input_fn

# 需要导入模块: from config import FLAGS [as 别名]
# 或者: from config.FLAGS import batch_size [as 别名]
def input_fn(path, is_train: bool):
  """Input pipeline for smallNORB using tf.data.
  
  Author:
    Ashley Gritzman 15/11/2018
  Args: 
    is_train:  
  Returns:
    dataset: image tf.data.Dataset 
  """

  import re
  if is_train:
    CHUNK_RE = re.compile(r"train.*\.tfrecords")
  else:
    CHUNK_RE = re.compile(r"test.*\.tfrecords")

  chunk_files = [os.path.join(path, fname)
           for fname in os.listdir(path)
           if CHUNK_RE.match(fname)]
  
  # 1. create the dataset
  dataset = tf.data.TFRecordDataset(chunk_files)
  
  # 2. map with the actual work (preprocessing, augmentation…) using multiple 
  # parallel calls
  dataset = dataset.map(_parser, num_parallel_calls=4)
  if is_train:
    dataset = dataset.map(_train_preprocess, 
                          num_parallel_calls=FLAGS.num_threads)
  else:
    dataset = dataset.map(_val_preprocess, 
                          num_parallel_calls=FLAGS.num_threads)
  
  # 3. shuffle (with a big enough buffer size)
  # In response to a question on OpenReview, Hinton et al. wrote the 
  # following:
  # https://openreview.net/forum?id=HJWLfGWRb&noteId=rJgxonoNnm
  # "We did not have any special ordering of training batches and we random 
  # shuffle. In terms of TF batch:
  # capacity=2000 + 3 * batch_size, ensures a minimum amount of shuffling of 
  # examples. min_after_dequeue=2000."
  capacity = 2000 + 3 * FLAGS.batch_size
  dataset = dataset.shuffle(buffer_size = capacity)
    
  # 4. batch
  dataset = dataset.batch(FLAGS.batch_size, drop_remainder=True)
  
  # 5. repeat
  dataset = dataset.repeat(count=FLAGS.epoch)
  
  # 6. prefetch
  dataset = dataset.prefetch(1)
  
  return dataset 
开发者ID:IBM,项目名称:matrix-capsules-with-em-routing,代码行数:57,代码来源:data_pipeline_norb.py

示例12: plot_smallnorb

# 需要导入模块: from config import FLAGS [as 别名]
# 或者: from config.FLAGS import batch_size [as 别名]
def plot_smallnorb(is_train=True, samples_per_class=5):
  """Plot examples from the smallNORB dataset.
  
  Execute this command in a Jupyter Notebook.
  
  Author:
    Ashley Gritzman 18/04/2019
  Args: 
    is_train: True for the training dataset, False for the test dataset
    samples_per_class: number of samples images per class
  Returns:
    None
  """
  
  # To plot pretty figures
  import matplotlib.pyplot as plt
  plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
  plt.rcParams['image.interpolation'] = 'nearest'
  plt.rcParams['image.cmap'] = 'gray'
  
  from config import get_dataset_path
  path = get_dataset_path("smallNORB")
  
  CLASSES = ['animal', 'human', 'airplane', 'truck', 'car']

  # Get batch from data queue. Batch size is FLAGS.batch_size, which is then 
  # divided across multiple GPUs
  input_dict = create_inputs_norb(path, is_train=is_train)
  with tf.Session() as sess:
    input_dict = sess.run(input_dict)
    
  img_bch = input_dict['image']
  lab_bch = input_dict['label']
  cat_bch = input_dict['category']
  elv_bch = input_dict['elevation']
  azi_bch = input_dict['azimuth']
  lit_bch = input_dict['lighting']
  
  num_classes = len(CLASSES)

  fig = plt.figure(figsize=(num_classes * 2, samples_per_class * 2))
  fig.suptitle("category, elevation, azimuth, lighting")  
  for y, cls in enumerate(CLASSES):
    idxs = np.flatnonzero(lab_bch == y)
    idxs = np.random.choice(idxs, samples_per_class, replace=False)
    for i, idx in enumerate(idxs):
      plt_idx = i * num_classes + y + 1
      plt.subplot(samples_per_class, num_classes, plt_idx)
      #plt.imshow(img_bch[idx].astype('uint8').squeeze())
      plt.imshow(np.squeeze(img_bch[idx]))
      plt.xticks([], [])
      plt.yticks([], [])
      plt.xlabel("{}, {}, {},{}".format(cat_bch[idx], elv_bch[idx], 
                        azi_bch[idx], lit_bch[idx]))
      # plt.axis('off')

      if i == 0:
        plt.title(cls)
  plt.show() 
开发者ID:IBM,项目名称:matrix-capsules-with-em-routing,代码行数:61,代码来源:data_pipeline_norb.py


注:本文中的config.FLAGS.batch_size方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。