当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.get_default_graph函数代码示例

本文整理汇总了Python中tensorflow.get_default_graph函数的典型用法代码示例。如果您正苦于以下问题:Python get_default_graph函数的具体用法?Python get_default_graph怎么用?Python get_default_graph使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了get_default_graph函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: predict

    def predict(self, test_X, isDBN):
        """Predict the labels for the test set.

        Parameters
        ----------

        test_X : array_like, shape (n_samples, n_features)
            Test data.

        Returns
        -------

        array_like, shape (n_samples,) : predicted labels.
        """
        with self.tf_graph.as_default():
	    self.tf_saver = tf.train.import_meta_graph(self.model_path+'.meta')
            with tf.Session() as self.tf_session:
                self.tf_saver.restore(self.tf_session, self.model_path)
		self.input_data = tf.get_default_graph().get_tensor_by_name('x-input:0')
		self.keep_prob = tf.get_default_graph().get_tensor_by_name('keep-probs:0')
		self.next_train = tf.get_default_graph().get_tensor_by_name('encode-9/dropout/mul:0')
		self.mod_y = tf.get_default_graph().get_tensor_by_name('linear/output-y:0')
                feed = {
                    self.input_data: test_X,
                    self.keep_prob: 1
                }
                if isDBN == True:
                    return self.next_train.eval(feed), self.mod_y.eval(feed)
                return self.mod_y.eval(feed)
开发者ID:yy2261,项目名称:Deep-Learning-TensorFlow,代码行数:29,代码来源:supervised_model.py

示例2: main

def main(args):
  
    with tf.Graph().as_default():

        with tf.Session() as sess:
            
            # Read the file containing the pairs used for testing
            pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))

            # Get the paths for the corresponding images
            paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
            
            # Load the model
            print('Loading model "%s"' % args.model_file)
            facenet.load_model(args.model_file)
            
            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")

            tpr, fpr, accuracy, val, val_std, far = lfw.validate(sess, 
                paths, actual_issame, args.seed, 60, 
                images_placeholder, phase_train_placeholder, embeddings, nrof_folds=args.lfw_nrof_folds)
            print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
            print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
            
            facenet.plot_roc(fpr, tpr, 'NN4')
开发者ID:turgunyusuf,项目名称:facenet,代码行数:28,代码来源:validate_on_lfw.py

示例3: test

    def test(self, test_samples, test_labels, *, data_iterator):
        if self.saver is None:
            self.define_model()
        if self.writer is None:
            self.writer = tf.summary.FileWriter('./board', tf.get_default_graph())

        print('Before session')
        with tf.Session(graph=tf.get_default_graph()) as session:
            self.saver.restore(session, self.save_path)
            ### 测试
            accuracies = []
            confusionMatrices = []
            for i, samples, labels in data_iterator(test_samples, test_labels, chunkSize=self.test_batch_size):
                result = session.run(
                    self.test_prediction,
                    feed_dict={self.tf_test_samples: samples}
                )
                # self.writer.add_summary(summary, i)
                accuracy, cm = self.accuracy(result, labels, need_confusion_matrix=True)
                accuracies.append(accuracy)
                confusionMatrices.append(cm)
                print('Test Accuracy: %.1f%%' % accuracy)
            print(' Average  Accuracy:', np.average(accuracies))
            print('Standard Deviation:', np.std(accuracies))
            self.print_confusion_matrix(np.add.reduce(confusionMatrices))
开发者ID:aiedward,项目名称:TensorFlow-and-DeepLearning-Tutorial,代码行数:25,代码来源:dp.py

示例4: __init__

	def __init__(self, model_dir, files, use_gpu=False):
		import tensorflow as tf
		from glob import glob
		self.files = files
		
		cluster_file = model_dir+'clusters.npy'
		self.n_clusters = getNCluster(cluster_file)
		self._load_cluster(cluster_file)
		
		if not use_gpu:
			d = tf.device('/cpu:0')
		data = tf.placeholder(tf.uint8, shape=(None,None,3))
		w_data = tf.expand_dims(tf.cast(data,tf.float32),0) - tf.constant([123,117,103],dtype=tf.float32,shape=(1,1,3))
		vgg = vgg16(w_data, n_out=self.n_clusters)
		avg_vgg = tf.reduce_mean(tf.reduce_mean(vgg,1),1)
		pred = tf.argmax(avg_vgg,dimension=1)
		self.vgg, self.pred, self.data = vgg, pred, data
		if not use_gpu:
			del d
		
		saver = tf.train.Saver(tf.all_variables())
		tf.get_default_graph().finalize()
		
		self.sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.3)))
		snap = youngest([f for f in glob(os.path.join(model_dir, 'final.ckpt*')) + glob(os.path.join(model_dir, 'snap.ckpt*')) if '.meta' not in f])
		saver.restore(self.sess, snap)
开发者ID:caomw,项目名称:geoloc,代码行数:26,代码来源:server.py

示例5: train

    def train(self, train_samples, train_labels, *, data_iterator, iteration_steps):
        self.writer = tf.summary.FileWriter('./board', tf.get_default_graph())
        with tf.Session(graph=tf.get_default_graph()) as session:
            tf.initialize_all_variables().run()

            ### 训练
            print('Start Training')
            # batch 1000
            for i, samples, labels in data_iterator(train_samples, train_labels, iteration_steps=iteration_steps,
                                                    chunkSize=self.train_batch_size):
                _, l, predictions, summary = session.run(
                    [self.optimizer, self.loss, self.train_prediction, self.merged_train_summary],
                    feed_dict={self.tf_train_samples: samples, self.tf_train_labels: labels}
                )
                self.writer.add_summary(summary, i)
                # labels is True Labels
                accuracy, _ = self.accuracy(predictions, labels)
                if i % 50 == 0:
                    print('Minibatch loss at step %d: %f' % (i, l))
                    print('Minibatch accuracy: %.1f%%' % accuracy)
            ###

            # 检查要存放的路径值否存在。这里假定只有一层路径。
            import os
            if os.path.isdir(self.save_path.split('/')[0]):
                save_path = self.saver.save(session, self.save_path)
                print("Model saved in file: %s" % save_path)
            else:
                os.makedirs(self.save_path.split('/')[0])
                save_path = self.saver.save(session, self.save_path)
                print("Model saved in file: %s" % save_path)
开发者ID:aiedward,项目名称:TensorFlow-and-DeepLearning-Tutorial,代码行数:31,代码来源:dp.py

示例6: build_reparam_loss_and_gradients

def build_reparam_loss_and_gradients(inference, var_list):
  """Build loss function. Its automatic differentiation
  is a stochastic gradient of

  $-\\text{ELBO} =
      -\mathbb{E}_{q(z; \lambda)} [ \log p(x, z) - \log q(z; \lambda) ]$

  based on the reparameterization trick (Kingma and Welling, 2014).

  Computed by sampling from $q(z;\lambda)$ and evaluating the
  expectation using Monte Carlo sampling.
  """
  p_log_prob = [0.0] * inference.n_samples
  q_log_prob = [0.0] * inference.n_samples
  base_scope = tf.get_default_graph().unique_name("inference") + '/'
  for s in range(inference.n_samples):
    # Form dictionary in order to replace conditioning on prior or
    # observed variable with conditioning on a specific value.
    scope = base_scope + tf.get_default_graph().unique_name("sample")
    dict_swap = {}
    for x, qx in six.iteritems(inference.data):
      if isinstance(x, RandomVariable):
        if isinstance(qx, RandomVariable):
          qx_copy = copy(qx, scope=scope)
          dict_swap[x] = qx_copy.value()
        else:
          dict_swap[x] = qx

    for z, qz in six.iteritems(inference.latent_vars):
      # Copy q(z) to obtain new set of posterior samples.
      qz_copy = copy(qz, scope=scope)
      dict_swap[z] = qz_copy.value()
      q_log_prob[s] += tf.reduce_sum(
          inference.scale.get(z, 1.0) * qz_copy.log_prob(dict_swap[z]))

    for z in six.iterkeys(inference.latent_vars):
      z_copy = copy(z, dict_swap, scope=scope)
      p_log_prob[s] += tf.reduce_sum(
          inference.scale.get(z, 1.0) * z_copy.log_prob(dict_swap[z]))

    for x in six.iterkeys(inference.data):
      if isinstance(x, RandomVariable):
        x_copy = copy(x, dict_swap, scope=scope)
        p_log_prob[s] += tf.reduce_sum(
            inference.scale.get(x, 1.0) * x_copy.log_prob(dict_swap[x]))

  p_log_prob = tf.reduce_mean(p_log_prob)
  q_log_prob = tf.reduce_mean(q_log_prob)

  if inference.logging:
    tf.summary.scalar("loss/p_log_prob", p_log_prob,
                      collections=[inference._summary_key])
    tf.summary.scalar("loss/q_log_prob", q_log_prob,
                      collections=[inference._summary_key])

  loss = -(p_log_prob - q_log_prob)

  grads = tf.gradients(loss, var_list)
  grads_and_vars = list(zip(grads, var_list))
  return loss, grads_and_vars
开发者ID:wujsAct,项目名称:edward,代码行数:60,代码来源:klqp.py

示例7: restore_and_run

def restore_and_run():
    sess=tf.Session()    
    #First let's load meta graph and restore weights
    saver = tf.train.import_meta_graph('my_test_model-1000.meta')
    saver.restore(sess,tf.train.latest_checkpoint('./'))


    # Access saved Variables directly
    b1_value = sess.run('bias:0')
    print(b1_value)
    # This will print 2, which is the value of bias that we saved

    # cannot directly get the w4, since it depends on w1 and w2
    # w1 and w2 need to be input
    #w4_value = sess.run('op_to_restore:0')


    # Now, let's access and create placeholders variables and
    # create feed-dict to feed new data
    graph = tf.get_default_graph()
    w1 = graph.get_tensor_by_name("w1:0")
    w2 = graph.get_tensor_by_name("w2:0")
    feed_dict ={w1:13.0,w2:17.0}

    #Now, access the op that you want to run. 
    w4 = graph.get_tensor_by_name("op_to_restore:0")

    w4_value = sess.run(w4,feed_dict)
    print(w4_value)
    #This will print 60 which is calculated 

    # see all operator tensor name
    graph.get_operations()
    [op.outputs for op in tf.get_default_graph().get_operations()]
开发者ID:jichen3000,项目名称:codes,代码行数:34,代码来源:save_restore_example.py

示例8: main

def main():
    with open(a.input_file, "rb") as f:
        input_data = f.read()

    input_instance = dict(input=base64.urlsafe_b64encode(input_data).decode("ascii"), key="0")
    input_instance = json.loads(json.dumps(input_instance))

    with tf.Session() as sess:
        saver = tf.train.import_meta_graph(a.model_dir + "/export.meta")
        saver.restore(sess, a.model_dir + "/export")
        input_vars = json.loads(tf.get_collection("inputs")[0])
        output_vars = json.loads(tf.get_collection("outputs")[0])
        input = tf.get_default_graph().get_tensor_by_name(input_vars["input"])
        output = tf.get_default_graph().get_tensor_by_name(output_vars["output"])

        input_value = np.array(input_instance["input"])
        output_value = sess.run(output, feed_dict={input: np.expand_dims(input_value, axis=0)})[0]

    output_instance = dict(output=output_value.decode("ascii"), key="0")

    b64data = output_instance["output"]
    b64data += "=" * (-len(b64data) % 4)
    output_data = base64.urlsafe_b64decode(b64data.encode("ascii"))

    with open(a.output_file, "wb") as f:
        f.write(output_data)
开发者ID:Lagogoy,项目名称:Deep-Learning-21-Examples,代码行数:26,代码来源:process-local.py

示例9: testFromDifferentScope

  def testFromDifferentScope(self):
    sub = functools.partial(re.sub, r'^[^/]+/', 'agent/')
    restore_initializers = {
        'w': initializers.restore_initializer(_checkpoint(), 'w', sub),
        'b': initializers.restore_initializer(_checkpoint(), 'b', sub)
    }

    with tf.variable_scope('some_random_scope'):
      c = convnet.ConvNet2D(
          output_channels=(16, 32),
          kernel_shapes=(8, 4),
          strides=(4, 2),
          paddings=[conv.VALID],
          activation=tf.nn.relu,
          activate_final=True,
          initializers=restore_initializers)

    inputs = tf.constant(1 / 255.0, shape=[1, 86, 86, 3])
    outputs = c(inputs)
    init = tf.global_variables_initializer()
    tf.get_default_graph().finalize()
    with self.test_session() as session:
      session.run(init)
      o = session.run(outputs)

    self.assertAllClose(
        np.linalg.norm(o), _TWO_CONV_LAYERS_RELU, atol=_TOLERANCE)
开发者ID:TianjiPang,项目名称:sonnet,代码行数:27,代码来源:initializers_test.py

示例10: _save_tf_model

    def _save_tf_model(self):
        ckpt_dir = '/opt/ml/output/data/checkpoint'
        model_dir = '/opt/ml/model'

        # Re-Initialize from the checkpoint so that you will have the latest models up.
        tf.train.init_from_checkpoint(ckpt_dir,
                                      {'main_level/agent/online/network_0/': 'main_level/agent/online/network_0'})
        tf.train.init_from_checkpoint(ckpt_dir,
                                      {'main_level/agent/online/network_1/': 'main_level/agent/online/network_1'})

        # Create a new session with a new tf graph.
        sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        sess.run(tf.global_variables_initializer())  # initialize the checkpoint.

        # This is the node that will accept the input.
        input_nodes = tf.get_default_graph().get_tensor_by_name('main_level/agent/main/online/' + \
                                                                'network_0/observation/observation:0')
        # This is the node that will produce the output.
        output_nodes = tf.get_default_graph().get_operation_by_name('main_level/agent/main/online/' + \
                                                                    'network_1/ppo_head_0/policy_mean/BiasAdd')
        # Save the model as a servable model.
        tf.saved_model.simple_save(session=sess,
                                   export_dir='model',
                                   inputs={"observation": input_nodes},
                                   outputs={"policy": output_nodes.outputs[0]})
        # Move to the appropriate folder. 
        shutil.move('model/', model_dir + '/model/tf-model/00000001/')
        # SageMaker will pick it up and upload to the right path.
        print("Success")
开发者ID:FNDaily,项目名称:amazon-sagemaker-examples,代码行数:29,代码来源:train-coach.py

示例11: __init__

    def __init__(self, hyperparams):

        #Model hyperparameters
        self.learning_rate = hyperparams['learning_rate']
        self.encoder_act_func = tf.nn.elu #tf.nn.softplus #tf.tanh
        self.decoder_act_func = tf.tanh
        self.encoder_net = hyperparams['encoder_net']
        self.decoder_net = hyperparams['decoder_net']
        self.z_size = hyperparams['z_size']  #Z
        self.x_size = hyperparams['x_size']  #X
        self.rs = 0
        self.n_W_particles = hyperparams['n_W_particles']  #S

        #Placeholders - Inputs/Targets
        self.x = tf.placeholder(tf.float32, [None, self.x_size])
        self.batch_size = tf.placeholder(tf.int32, None)   #B
        self.n_z_particles = tf.placeholder(tf.int32, None)  #P
        self.batch_frac = tf.placeholder(tf.float32, None)
        
        #Objective
        self.elbo = self.objective(self.x)

        # Minimize negative ELBO
        self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, 
                                                epsilon=1e-02).minimize(-self.elbo)

        #Finalize Initilization
        self.init_vars = tf.global_variables_initializer()
        self.saver = tf.train.Saver()
        tf.get_default_graph().finalize()
        self.sess = tf.Session()
开发者ID:chriscremer,项目名称:Other_Code,代码行数:31,代码来源:BVAE.py

示例12: main

def main(args):
    align = align_dlib.AlignDlib(os.path.expanduser(args.dlib_face_predictor))
    image_paths = [args.image1, args.image2]
    landmarkIndices = align_dlib.AlignDlib.OUTER_EYES_AND_NOSE

    with tf.Graph().as_default():

        with tf.Session() as sess:
      
            # Load the model
            print('Loading model "%s"' % args.model_file)
            facenet.load_model(args.model_file)
    
            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            image_size = int(images_placeholder.get_shape()[1])

            # Run forward pass to calculate embeddings
            images = load_and_align_data(image_paths, image_size, align, landmarkIndices)
            feed_dict = { images_placeholder: images, phase_train_placeholder: False }
            emb = sess.run(embeddings, feed_dict=feed_dict)
            dist = np.sqrt(np.mean(np.square(np.subtract(emb[0,:], emb[1,:]))))
            print('Distance between the embeddings: %3.6f' % dist)
开发者ID:turgunyusuf,项目名称:facenet,代码行数:25,代码来源:compare.py

示例13: test_input_pipeline

 def test_input_pipeline(self):
     Xs, Ys = dsu.tiny_imagenet_load()
     n_batches = 0
     batch_size = 10
     with tf.Graph().as_default(), tf.Session() as sess:
         batch_generator = dsu.create_input_pipeline(
             Xs[:100],
             batch_size=batch_size,
             n_epochs=1,
             shape=(64, 64, 3),
             crop_shape=(64, 64, 3))
         init_op = tf.group(tf.global_variables_initializer(),
                            tf.local_variables_initializer())
         sess.run(init_op)
         coord = tf.train.Coordinator()
         tf.get_default_graph().finalize()
         threads = tf.train.start_queue_runners(sess=sess, coord=coord)
         try:
             while not coord.should_stop():
                 batch = sess.run(batch_generator)
                 assert (batch.shape == (batch_size, 64, 64, 3))
                 n_batches += 1
         except tf.errors.OutOfRangeError:
             pass
         finally:
             coord.request_stop()
         coord.join(threads)
     assert (n_batches == 10)
开发者ID:pradeeps,项目名称:pycadl,代码行数:28,代码来源:test_dataset_utils.py

示例14: test_distribution_creation_global_graph

def test_distribution_creation_global_graph():
    # Distribution creation doesn't modify the global graph
    before = tf.get_default_graph().as_graph_def()
    with tp.Model():
        tp.Parameter()
    after = tf.get_default_graph().as_graph_def()
    assert before == after
开发者ID:MaxNoe,项目名称:tensorprob,代码行数:7,代码来源:test_model.py

示例15: central_step

 def central_step():
     # restore v1, slots
     op5 = tf.group(*[ tf.assign(w,v) for w,v in zip(restored_vars, tmp_vars)])
     with tf.get_default_graph().control_dependencies([op5]):
         back =  tf.group(*[tf.assign_sub(v, -self._lr_t*grad) for grad,v in grads_and_vars])
         with tf.get_default_graph().control_dependencies([back]):
             return tf.gradients(self.gan.trainer.d_loss, d_vars) + tf.gradients(self.gan.trainer.g_loss, g_vars)
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:7,代码来源:curl_optimizer.py


注:本文中的tensorflow.get_default_graph函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。