当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.initialize_local_variables方法代码示例

本文整理汇总了Python中tensorflow.initialize_local_variables方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.initialize_local_variables方法的具体用法?Python tensorflow.initialize_local_variables怎么用?Python tensorflow.initialize_local_variables使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.initialize_local_variables方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_lm

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import initialize_local_variables [as 别名]
def test_lm(self):
        hps = get_test_hparams()

        with tf.variable_scope("model"):
            model = LM(hps)

        with self.test_session() as sess:
            tf.initialize_all_variables().run()
            tf.initialize_local_variables().run()

            loss = 1e5
            for i in range(50):
                x, y, w = simple_data_generator(hps.batch_size, hps.num_steps)
                loss, _ = sess.run([model.loss, model.train_op], {model.x: x, model.y: y, model.w: w})
                print("%d: %.3f %.3f" % (i, loss, np.exp(loss)))
                if np.isnan(loss):
                    print("NaN detected")
                    break

            self.assertLess(loss, 1.0) 
开发者ID:rafaljozefowicz,项目名称:lm,代码行数:22,代码来源:language_model_test.py

示例2: export_intermediate

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import initialize_local_variables [as 别名]
def export_intermediate(FLAGS, sess, dataset):
    # Models
    x = tf.placeholder(tf.float32, shape=[
        None, IMAGE_SIZE['resized'][0], IMAGE_SIZE['resized'][1], 3])
    dropout = tf.placeholder(tf.float32)
    feat_model = discriminator(x, reuse=False, dropout=dropout, int_feats=True)

    # Init
    init_op = tf.group(tf.initialize_all_variables(),
                       tf.initialize_local_variables())
    sess.run(init_op)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)

    # Restore
    saver = tf.train.Saver()
    checkpoint = tf.train.latest_checkpoint(FLAGS.logdir)
    saver.restore(sess, checkpoint)

    # Run
    all_features = np.zeros((dataset['size'], feat_model.get_shape()[1]))
    all_paths = []
    for i in itertools.count():
        try:
            images, paths = sess.run(dataset['batch'])
        except tf.errors.OutOfRangeError:
            break
        if i % 10 == 0:
            print(i * FLAGS.batch_size, dataset['size'])
        im_features = sess.run(feat_model, feed_dict={x: images, dropout: 1, })
        all_features[FLAGS.batch_size * i:FLAGS.batch_size * i + im_features.shape[0]] = im_features
        all_paths += list(paths)

    # Finish off the filename queue coordinator.
    coord.request_stop()
    coord.join(threads)

    return all_features, all_paths 
开发者ID:marcbelmont,项目名称:gan-image-similarity,代码行数:40,代码来源:main.py

示例3: main

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import initialize_local_variables [as 别名]
def main():
    data_dir = '/output/combined'
    num_images = 1452601

    # Build graph and initialize variables
    read_op = create_read_graph(data_dir, 'combined')
    init_op = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())
    sess = tf.Session()
    sess.run(init_op)

    # Start input enqueue threads
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    read_count = 0
    try:
        while read_count < num_images and not coord.should_stop():
            images, timestamps, angles, _ = sess.run(read_op)
            for i in range(images.shape[0]):
                decoded_image = images[i]
                assert decoded_image.shape[2] == 3
                print(angles[i])
                read_count += 1
            if not read_count % 1000:
                print("Read %d examples" % read_count)

    except tf.errors.OutOfRangeError:
        print("Reading stopped by Queue")
    finally:
        # Ask the threads to stop.
        coord.request_stop()

    print("Done reading %d images" % read_count)

    # Wait for threads to finish.
    coord.join(threads)
    sess.close() 
开发者ID:rwightman,项目名称:udacity-driving-reader,代码行数:38,代码来源:readtf.py

示例4: train

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import initialize_local_variables [as 别名]
def train(config):
  with tf.Graph().as_default():
    model = FW_model(config)
    inputs_seqs_batch, outputs_batch = model.reader.read()
    init_op = tf.group(tf.initialize_all_variables(),
                       tf.initialize_local_variables())

    sess = tf.Session()
    sess.run(init_op)
    saver = tf.train.Saver(tf.all_variables())
    global_steps = 0

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    train_writer = tf.train.SummaryWriter("./log/FW/train", sess.graph)
    validation_writer = tf.train.SummaryWriter("./log/FW/validation", sess.graph)
    try:
      while not coord.should_stop():
        input_data, targets = sess.run([inputs_seqs_batch, outputs_batch])
        cost, _, summary= sess.run([model.cost, model.train_op, model.summary_all], {model.input_data: input_data,
                                                                                     model.targets: targets})
        print("Step %d: cost:%f" % (global_steps,  cost))
        train_writer.add_summary(summary, global_steps)

        global_steps += 1
        if global_steps % 1000 == 0:
          (accuracy, summary) = sess.run([model.accuracy, model.summary_accuracy], {model.input_data: model.validation_inputs,
                                                                                    model.targets: model.validation_targets})
          validation_writer.add_summary(summary, global_steps)
          print("Accuracy: %f" % accuracy)
          print(saver.save(sess, "./save/FW/save", global_step=global_steps))
        if global_steps > 60000:
          break
    except tf.errors.OutOfRangeError:
      print("Error")
    finally:
      # When done, ask the threads to stop.
      coord.request_stop()
    coord.join(threads)
    sess.close() 
开发者ID:jxwufan,项目名称:AssociativeRetrieval,代码行数:43,代码来源:FW_train.py

示例5: load_validation

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import initialize_local_variables [as 别名]
def load_validation(self):
    data_reader = utils.DataReader(data_filename="input_seqs_validation", batch_size=16)
    inputs_seqs_batch, outputs_batch = data_reader.read(False, 1)
    init_op = tf.group(tf.initialize_all_variables(),
                       tf.initialize_local_variables())

    sess = tf.Session()
    sess.run(init_op)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    self.validation_inputs = []
    self.validation_targets = []
    try:
      while not coord.should_stop():
        input_data, targets = sess.run([inputs_seqs_batch, outputs_batch])
        self.validation_inputs.append(input_data)
        self.validation_targets.append(targets)
    except tf.errors.OutOfRangeError:
      pass
    finally:
      coord.request_stop()
    coord.join(threads)
    sess.close()

    self.validation_inputs = np.array(self.validation_inputs).reshape([-1, self.config.input_length])
    self.validation_targets = np.array(self.validation_targets).reshape([-1, 1]) 
开发者ID:jxwufan,项目名称:AssociativeRetrieval,代码行数:28,代码来源:LSTM_model.py

示例6: train

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import initialize_local_variables [as 别名]
def train(config):
  with tf.Graph().as_default():
    model = FW_model(config)
    inputs_seqs_batch, outputs_batch = model.reader.read(shuffle=False, num_epochs=1)
    init_op = tf.group(tf.initialize_all_variables(),
                       tf.initialize_local_variables())

    sess = tf.Session()
    sess.run(init_op)
    saver = tf.train.Saver(tf.all_variables())
    global_steps = 0

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    saver.restore(sess, "./save/FW/save-60000")

    correct_count = 0
    evaled_count = 0
    try:
      while not coord.should_stop():
        input_data, targets = sess.run([inputs_seqs_batch, outputs_batch])
        probs = sess.run([model.probs], {model.input_data: input_data,
                                                          model.targets: targets})
        probs = np.array(probs).reshape([-1, config.vocab_size])
        targets = np.array([t[0] for t in targets])
        output = np.argmax(probs, axis=1)

        correct_count += np.sum(output == targets)
        evaled_count += len(output)

    except tf.errors.OutOfRangeError:
        pass
    finally:
      # When done, ask the threads to stop.
      coord.request_stop()
    print("Accuracy: %f" % (float(correct_count) / evaled_count))
    coord.join(threads)
    sess.close() 
开发者ID:jxwufan,项目名称:AssociativeRetrieval,代码行数:41,代码来源:FW_eval.py

示例7: train

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import initialize_local_variables [as 别名]
def train(config):
  with tf.Graph().as_default():
    model = LSTM_model(config)
    inputs_seqs_batch, outputs_batch = model.reader.read(shuffle=False, num_epochs=1)
    init_op = tf.group(tf.initialize_all_variables(),
                       tf.initialize_local_variables())

    sess = tf.Session()
    sess.run(init_op)
    saver = tf.train.Saver(tf.all_variables())
    global_steps = 0

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    saver.restore(sess, "./save/LSTM/save-60000")

    correct_count = 0
    evaled_count = 0
    try:
      while not coord.should_stop():
        input_data, targets = sess.run([inputs_seqs_batch, outputs_batch])
        probs = sess.run([model.probs], {model.input_data: input_data,
                                                          model.targets: targets})
        probs = np.array(probs).reshape([-1, config.vocab_size])
        targets = np.array([t[0] for t in targets])
        output = np.argmax(probs, axis=1)

        correct_count += np.sum(output == targets)
        evaled_count += len(output)

    except tf.errors.OutOfRangeError:
        pass
    finally:
      # When done, ask the threads to stop.
      coord.request_stop()
    print("Accuracy: %f" % (float(correct_count) / evaled_count))
    coord.join(threads)
    sess.close() 
开发者ID:jxwufan,项目名称:AssociativeRetrieval,代码行数:41,代码来源:LSTM_eval.py

示例8: load_tfrecord

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import initialize_local_variables [as 别名]
def load_tfrecord(filename):
  g = tf.Graph()
  with g.as_default():
    tf.logging.set_verbosity(tf.logging.INFO)

    mosaic, demosaic_truth, readvar, shotfactor = read_and_decode_single(filename)
    init_op = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())
    with tf.Session() as sess:
      sess.run(init_op)
      mosaic, demosaic_truth, readvar, shotfactor = \
        sess.run([mosaic, demosaic_truth, readvar, shotfactor])

      return mosaic, demosaic_truth, readvar, shotfactor 
开发者ID:google,项目名称:burst-denoising,代码行数:15,代码来源:kpn_data_provider.py

示例9: main

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import initialize_local_variables [as 别名]
def main(_):


    cfg = config.Config()
    cfg.batch_size = 1
    cfg.n_epochs = 1


    data_pipeline = dpp.DataPipeline(FLAGS.data_path,
                                     config=cfg,
                                     is_training=False)
    samples = data_pipeline.samples
    labels = data_pipeline.labels
    start_time = data_pipeline.start_time
    end_time = data_pipeline.end_time

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        tf.initialize_local_variables().run()
        threads = tf.train.start_queue_runners(coord=coord)

        try:
            for i in (range(FLAGS.windows)):
                to_fetch= [samples, labels, start_time, end_time]
                sample, label, starttime, endtime = sess.run(to_fetch)
                # assert starttime < endtime
                print('starttime {}, endtime {}'.format(UTCDateTime(starttime),
                                                        UTCDateTime(endtime)))
                print("label", label[0])
                sample = np.squeeze(sample, axis=(0,))
                target = np.squeeze(label, axis=(0,))
        except tf.errors.OutOfRangeError:
            print 'Evaluation completed ({} epochs).'.format(cfg.n_epochs)

        print "{} windows seen".format(i+1)
        coord.request_stop()
        coord.join(threads) 
开发者ID:tperol,项目名称:ConvNetQuake,代码行数:39,代码来源:print_clusterid_from_tfrecords.py

示例10: test_variable_size_record

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import initialize_local_variables [as 别名]
def test_variable_size_record(self):
        # WRITING
        with VariableSizeTypesRecordWriter("variable.tfrecord", DIR_TFRECORDS) as writer:
            for i in range(2):
                writer.write_test()

        # READING
        reader = VariableSizeTypesRecordReader("variable.tfrecord", DIR_TFRECORDS)
        read_one_example = reader.read_operation

        with tf.Session() as sess:
            sess.run(
                [tf.global_variables_initializer(), tf.initialize_local_variables()]
            )

            # Coordinate the queue of tfrecord files.
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            # Reading examples sequentially one by one
            for j in range(3):
                fetches = sess.run(read_one_example)
                print("Read:", fetches)

            # Finish off the queue coordinator.
            coord.request_stop()
            coord.join(threads) 
开发者ID:baldassarreFe,项目名称:deep-koalarization,代码行数:29,代码来源:test_write_read_variable.py

示例11: run_eval

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import initialize_local_variables [as 别名]
def run_eval(dataset, hps, logdir, mode, num_eval_steps):
    with tf.variable_scope("model"):
        hps.num_sampled = 0  # Always using full softmax at evaluation.
        hps.keep_prob = 1.0
        model = LM(hps, "eval", "/cpu:0")

    if hps.average_params:
        print("Averaging parameters for evaluation.")
        saver = tf.train.Saver(model.avg_dict)
    else:
        saver = tf.train.Saver()

    # Use only 4 threads for the evaluation.
    config = tf.ConfigProto(allow_soft_placement=True,
                            intra_op_parallelism_threads=20,
                            inter_op_parallelism_threads=1)
    sess = tf.Session(config=config)
    sw = tf.train.SummaryWriter(logdir + "/" + mode, sess.graph)
    ckpt_loader = CheckpointLoader(saver, model.global_step, logdir + "/train")

    with sess.as_default():
        while ckpt_loader.load_checkpoint():
            global_step = ckpt_loader.last_global_step
            data_iterator = dataset.iterate_once(hps.batch_size * hps.num_gpus, hps.num_steps)
            tf.initialize_local_variables().run()
            loss_nom = 0.0
            loss_den = 0.0
            for i, (x, y, w) in enumerate(data_iterator):
                if i >= num_eval_steps:
                    break

                loss = sess.run(model.loss, {model.x: x, model.y: y, model.w: w})
                loss_nom += loss
                loss_den += w.mean()
                loss = loss_nom / loss_den
                sys.stdout.write("%d: %.3f (%.3f) ... " % (i, loss, np.exp(loss)))
                sys.stdout.flush()
            sys.stdout.write("\n")

            log_perplexity = loss_nom / loss_den
            print("Results at %d: log_perplexity = %.3f perplexity = %.3f" % (
                global_step, log_perplexity, np.exp(log_perplexity)))

            summary = tf.Summary()
            summary.value.add(tag='eval/log_perplexity', simple_value=log_perplexity)
            summary.value.add(tag='eval/perplexity', simple_value=np.exp(log_perplexity))
            sw.add_summary(summary, global_step)
            sw.flush() 
开发者ID:rafaljozefowicz,项目名称:lm,代码行数:50,代码来源:run_utils.py

示例12: main

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import initialize_local_variables [as 别名]
def main(files_pattern):
    data_files = gfile.Glob(files_pattern)
    filename_queue = tf.train.string_input_producer(
            data_files, num_epochs=1, shuffle=False)

    reader = YT8MFrameFeatureReader(feature_sizes=[1024, 128], feature_names=["rgb", "audio"]) 
    vals = reader.prepare_reader(filename_queue)

    with tf.Session() as sess:
        sess.run(tf.initialize_local_variables())
        sess.run(tf.initialize_all_variables())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        vid_num = 0
        all_data = []
        try:
            while not coord.should_stop():
                vid, features, audios, labels, nframes = sess.run(vals)
                label_index = np.where(labels==True)[0].tolist()
                vid_num += 1

                #print vid, features.shape, audios.shape, label_index, nframes
                #sys.exit()
 
                features_int = features.astype(np.uint8)
                audios_int = audios.astype(np.uint8)

                dd = {}
                dd['video']   = vid
                dd['feature'] = features_int
                dd['audio']   = audios_int
                dd['label']   = label_index
                dd['nframes'] = nframes
                all_data.append(dd)

        except tf.errors.OutOfRangeError:
            print('Finished extracting.')

        finally:
            coord.request_stop()
            coord.join(threads)

    print vid_num

    record_name = files_pattern.split('/')[-1].split('.')[0]
    outp = open('./validate_pkl_all/%s.pkl'%record_name, 'wb')
    cPickle.dump(all_data, outp, protocol=cPickle.HIGHEST_PROTOCOL)
    outp.close() 
开发者ID:baidu,项目名称:Youtube-8M,代码行数:51,代码来源:parse_yt8m_v2_all.py

示例13: similarity

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import initialize_local_variables [as 别名]
def similarity(FLAGS, sess, all_features, all_paths):
    def select_images(distances):
        indices = np.argsort(distances)
        images = []
        size = 40
        for i in range(size):
            images += [dict(path=all_paths[indices[i]],
                            index=indices[i],
                            distance=distances[indices[i]])]
        return images

    # Distance
    x1 = tf.placeholder(tf.float32, shape=[None, all_features.shape[1]])
    x2 = tf.placeholder(tf.float32, shape=[None, all_features.shape[1]])
    l2diff = tf.sqrt(tf.reduce_sum(tf.square(tf.sub(x1, x2)), reduction_indices=1))

    # Init
    init_op = tf.group(tf.initialize_all_variables(),
                       tf.initialize_local_variables())
    sess.run(init_op)

    #
    clip = 1e-3
    np.clip(all_features, -clip, clip, all_features)

    # Get distances
    result = []
    bs = 100
    needles = [randint(0, all_features.shape[0]) for x in range(10)]
    for needle in needles:
        item_block = np.reshape(np.tile(all_features[needle], bs), [bs, -1])
        distances = np.zeros(all_features.shape[0])
        for i in range(0, all_features.shape[0], bs):
            if i + bs > all_features.shape[0]:
                bs = all_features.shape[0] - i
            distances[i:i + bs] = sess.run(
                l2diff, feed_dict={x1: item_block[:bs], x2: all_features[i:i + bs]})

        # Pick best matches
        result += [select_images(distances)]

    with open('logs/data.json', 'w') as f:
        json.dump(dict(data=result), f)
    return


########
# Main #
######## 
开发者ID:marcbelmont,项目名称:gan-image-similarity,代码行数:51,代码来源:main.py

示例14: main

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import initialize_local_variables [as 别名]
def main(argv):
  tf.logging.set_verbosity(tf.logging.INFO)
  trainer_lib.set_random_seed(FLAGS.random_seed)
  usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
  t2t_trainer.maybe_log_registry_and_exit()


  if FLAGS.generate_data:
    t2t_trainer.generate_data()

  if argv:
    t2t_trainer.set_hparams_from_args(argv[1:])
  hparams = t2t_trainer.create_hparams()
  trainer_lib.add_problem_hparams(hparams, FLAGS.problem)
  pruning_params = create_pruning_params()
  pruning_strategy = create_pruning_strategy(pruning_params.strategy)

  config = t2t_trainer.create_run_config(hparams)
  params = {"batch_size": hparams.batch_size}

  # add "_rev" as a hack to avoid image standardization
  problem = registry.problem(FLAGS.problem)
  input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.EVAL,
                                             hparams)
  dataset = input_fn(params, config).repeat()
  features, labels = dataset.make_one_shot_iterator().get_next()

  sess = tf.Session()

  model_fn = t2t_model.T2TModel.make_estimator_model_fn(
      FLAGS.model, hparams, use_tpu=FLAGS.use_tpu)
  spec = model_fn(
      features,
      labels,
      tf.estimator.ModeKeys.EVAL,
      params=hparams,
      config=config)

  # Restore weights
  saver = tf.train.Saver()
  checkpoint_path = os.path.expanduser(FLAGS.output_dir or
                                       FLAGS.checkpoint_path)
  saver.restore(sess, tf.train.latest_checkpoint(checkpoint_path))

  def eval_model():
    preds = spec.predictions["predictions"]
    preds = tf.argmax(preds, -1, output_type=labels.dtype)
    _, acc_update_op = tf.metrics.accuracy(labels=labels, predictions=preds)
    sess.run(tf.initialize_local_variables())
    for _ in range(FLAGS.eval_steps):
      acc = sess.run(acc_update_op)
    return acc

  pruning_utils.sparsify(sess, eval_model, pruning_strategy, pruning_params) 
开发者ID:yyht,项目名称:BERT,代码行数:56,代码来源:t2t_prune.py

示例15: use_fined_model

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import initialize_local_variables [as 别名]
def use_fined_model(self):
        image_size = inception.inception_v4.default_image_size
        batch_size = 3
        flowers_data_dir = "../../data/flower"
        train_dir = '/tmp/inception_finetuned/'
        
        with tf.Graph().as_default():
            tf.logging.set_verbosity(tf.logging.INFO)
            
            dataset = flowers.get_split('train', flowers_data_dir)
            images, images_raw, labels = self.load_batch(dataset, height=image_size, width=image_size)
            
            # Create the model, use the default arg scope to configure the batch norm parameters.
            with slim.arg_scope(inception.inception_v4_arg_scope()):
                logits, _ = inception.inception_v4(images, num_classes=dataset.num_classes, is_training=True)
        
            probabilities = tf.nn.softmax(logits)
            
            checkpoint_path = tf.train.latest_checkpoint(train_dir)
            init_fn = slim.assign_from_checkpoint_fn(
              checkpoint_path,
              slim.get_variables_to_restore())
            
            with tf.Session() as sess:
                with slim.queues.QueueRunners(sess):
                    sess.run(tf.initialize_local_variables())
                    init_fn(sess)
                    np_probabilities, np_images_raw, np_labels = sess.run([probabilities, images_raw, labels])
            
                    for i in range(batch_size): 
                        image = np_images_raw[i, :, :, :]
                        true_label = np_labels[i]
                        predicted_label = np.argmax(np_probabilities[i, :])
                        predicted_name = dataset.labels_to_names[predicted_label]
                        true_name = dataset.labels_to_names[true_label]
                        
                        plt.figure()
                        plt.imshow(image.astype(np.uint8))
                        plt.title('Ground Truth: [%s], Prediction [%s]' % (true_name, predicted_name))
                        plt.axis('off')
                        plt.show()
                return 
开发者ID:LevinJ,项目名称:SSD_tensorflow_VOC,代码行数:44,代码来源:pretrained.py


注:本文中的tensorflow.initialize_local_variables方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。