当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.group函数代码示例

本文整理汇总了Python中tensorflow.group函数的典型用法代码示例。如果您正苦于以下问题:Python group函数的具体用法?Python group怎么用?Python group使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了group函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testSummariesAreFlushedToDiskWithoutGlobalStep

  def testSummariesAreFlushedToDiskWithoutGlobalStep(self):
    output_dir = os.path.join(self.get_temp_dir(), 'flush_test_no_global_step')
    if tf.gfile.Exists(output_dir):  # For running on jenkins.
      tf.gfile.DeleteRecursively(output_dir)

    names_to_metrics, names_to_updates = self._create_names_to_metrics(
        self._predictions, self._labels)

    for k in names_to_metrics:
      v = names_to_metrics[k]
      tf.summary.scalar(k, v)

    summary_writer = tf.train.SummaryWriter(output_dir)

    initial_op = tf.group(tf.global_variables_initializer(),
                          tf.local_variables_initializer())
    eval_op = tf.group(*names_to_updates.values())

    with self.test_session() as sess:
      slim.evaluation.evaluation(
          sess,
          initial_op=initial_op,
          eval_op=eval_op,
          summary_op=tf.summary.merge_all(),
          summary_writer=summary_writer)

      names_to_values = {name: names_to_metrics[name].eval()
                         for name in names_to_metrics}
    self._verify_summaries(output_dir, names_to_values)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:29,代码来源:evaluation_test.py

示例2: __init__

    def __init__(self, dataset_path_or_instance, layers, epochs, starter_learning_rate, noise_std,
                 train_ratio=0.8, test_ratio=0.1, validation_ratio=0.1):
        super(MultilayerPerceptron, self).__init__(dataset_path_or_instance, epochs, starter_learning_rate,
                                                   train_ratio, test_ratio, validation_ratio)
        self._noise_std = noise_std

        self._layers = layers
        self._layers.insert(0, self._input_size)
        self._layers.append(self._output_size)
        self._L = len(self._layers) - 1  # size of layers ignoring input layer

        # build network and return cost function
        self._cost = self.__build_network__()

        # define the y function as the classification function
        self._y = self.__build_classifier__()

        # loss
        self._loss = -tf.reduce_mean(tf.reduce_sum(self._outputs*tf.log(self._cost), 1))

        # y_true and y_pred used to get the metrics
        self._y_true = tf.argmax(self._outputs, 1)
        self._y_pred = tf.argmax(self._y, 1)

        # train_step for the weight parameters, optimized with Adam
        self._learning_rate = tf.Variable(self._starter_learning_rate, trainable=False)
        self._train_step = tf.train.AdamOptimizer(self._learning_rate).minimize(self._loss)

        # add the updates of batch normalization statistics to train_step
        bn_updates = tf.group(*self._bn_assigns)
        with tf.control_dependencies([self._train_step]):
            self._train_step = tf.group(bn_updates)
开发者ID:crscardellino,项目名称:dnnwsd,代码行数:32,代码来源:neuralnetwork.py

示例3: init_gradients

 def init_gradients(self, loss, var_train):
     if self.play_mode:
         return
     
     with tf.device(self.args.device):
         var_refs = [v.ref() for v in var_train]
         train_gradients = tf.gradients(
             loss, var_refs,
             gate_gradients=False,
             aggregation_method=None,
             colocate_gradients_with_ops=False)
 
         acc_gradient_list = []
         train_step_list = []
         new_grad_vars = []
         self.grad_list = []
         var_list = []
         for grad, var in zip(train_gradients, self.global_vars):
             acc_gradient = tf.Variable(tf.zeros(grad.get_shape()), trainable=False)
             acc_gradient_list.append(acc_gradient)
             train_step_list.append(acc_gradient.assign_add(grad))
             new_grad_vars.append((tf.convert_to_tensor(acc_gradient, dtype=tf.float32), var))
             self.grad_list.append(acc_gradient)
             var_list.append(var)
         
         self.train_step = tf.group(*train_step_list)                
         
         self.reset_acc_gradients = tf.initialize_variables(acc_gradient_list)        
         self.apply_grads = self.global_optimizer.apply_gradients(new_grad_vars)
 
         sync_list = []
         for i in range(0, len(self.global_vars)):
             sync_list.append(var_train[i].assign(self.global_vars[i]))
         self.sync = tf.group(*sync_list)
开发者ID:only4hj,项目名称:DeepRL,代码行数:34,代码来源:model_tf_async.py

示例4: optimize

    def optimize(self, learning_rate, train_layers,global_step,source_centroid,target_centroid):
        print '+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++'
	print train_layers
	var_list = [v for v in tf.trainable_variables() if v.name.split('/')[1] in train_layers+['fc9']]
	finetune_list=[v for v in var_list if v.name.split('/')[1] in ['conv1','conv2','conv3','conv4','conv5','fc6','fc7']]
	new_list=[v for v in var_list if v.name.split('/')[1] in ['fc8','fc9']]
	self.Gregloss=0.0005*tf.reduce_mean([tf.nn.l2_loss(x) for x in var_list if 'weights' in x.name])
	
	finetune_weights=[v for v in finetune_list if 'weights' in v.name]
	finetune_biases=[v for v in finetune_list if 'biases' in v.name]
	new_weights=[v for v in new_list if 'weights' in v.name]
	new_biases=[v for v in new_list if 'biases' in v.name]

	
	print '==============finetune_weights======================='
	print finetune_weights
	print '==============finetune_biases======================='
	print finetune_biases
	print '==============new_weights======================='
	print new_weights
	print '==============new_biases======================='
	print new_biases
	
        self.F_loss=self.loss+self.Gregloss+global_step*self.G_loss+global_step*self.Semanticloss
	train_op1=tf.train.MomentumOptimizer(learning_rate*0.1,0.9).minimize(self.F_loss, var_list=finetune_weights)
	train_op2=tf.train.MomentumOptimizer(learning_rate*0.2,0.9).minimize(self.F_loss, var_list=finetune_biases)
        train_op3=tf.train.MomentumOptimizer(learning_rate*1.0,0.9).minimize(self.F_loss, var_list=new_weights)
        train_op4=tf.train.MomentumOptimizer(learning_rate*2.0,0.9).minimize(self.F_loss, var_list=new_biases)
	train_op=tf.group(train_op1,train_op2,train_op3,train_op4)
	with tf.control_dependencies([train_op1,train_op2,train_op3,train_op4]):
	    update_sc=self.source_moving_centroid.assign(source_centroid)
	    update_tc=self.target_moving_centroid.assign(target_centroid)
	
	return tf.group(update_sc,update_tc)
开发者ID:slowbull,项目名称:Moving-Semantic-Transfer-Network,代码行数:34,代码来源:mstnmodel.py

示例5: nabla

 def nabla(self,cost,n,c=1,q=0.001,a=0.001,A=100,alpha=0.602,gamma=0.101):
     cn=(c+0.0)/(n+A)**gamma
     an=a/(n+1+A)**alpha
     qk=math.sqrt(q/(n+A)*math.log(math.log(n+A)))
     wk=normal()
     dv=[]
     sess=self.sess
     g=[]
     orig=self.var
     for m in self.var:
         shape=m.shape
         nm=np.ones(shape=shape)
         for x in np.nditer(nm, op_flags=['readwrite']):
             x[...]=dist.bernoulli() * 2 * cn
         dv.append(nm)
     del l=[:]
     for m,d,t in zip(self.var,dv,self.var_t):
         l.append(t.assign(m+d))
     sess.run(tf.group(*l))
     f1=sess.run(cost,self.feed)
     del l=[:]
     for m,d,t in zip(self.var,dv,self.var_t):
         l.append(t.assign(m-d))
     sess.run(tf.group(*l))
     f0=sess.run(cost,self.feed)
     df=f1-f0
     for m in dv:
         for x in np.nditer(m, op_flags=['readwrite']):
             x[...]=-(df+0.0)/x/2
     return dv
开发者ID:Vendea,项目名称:summer-research-2016,代码行数:30,代码来源:DistributedSPSA.py

示例6: central_step

 def central_step():
     # restore v1, slots
     op5 = tf.group(*[ tf.assign(w,v) for w,v in zip(restored_vars, tmp_vars)])
     with tf.get_default_graph().control_dependencies([op5]):
         back =  tf.group(*[tf.assign_sub(v, -self._lr_t*grad) for grad,v in grads_and_vars])
         with tf.get_default_graph().control_dependencies([back]):
             return tf.gradients(self.gan.trainer.d_loss, d_vars) + tf.gradients(self.gan.trainer.g_loss, g_vars)
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:7,代码来源:curl_optimizer.py

示例7: build_rmsprop_optimizer

	def build_rmsprop_optimizer(self, learning_rate, rmsprop_decay, rmsprop_constant, gradient_clip, version):

		with tf.name_scope('rmsprop'):
			optimizer = tf.train.GradientDescentOptimizer(learning_rate)

			grads_and_vars = optimizer.compute_gradients(self.loss)
			grads = [gv[0] for gv in grads_and_vars]
			params = [gv[1] for gv in grads_and_vars]

			if gradient_clip > 0:
				grads = tf.clip_by_global_norm(grads, gradient_clip)

			if version == 'rmsprop':
				return optimizer.apply_gradients(zip(grads, params))
			elif version == 'graves_rmsprop':
				square_grads = [tf.square(grad) for grad in grads]

				avg_grads = [tf.Variable(tf.ones(var.get_shape())) for var in params]
				avg_square_grads = [tf.Variable(tf.ones(var.get_shape())) for var in params]

				update_avg_grads = [grad_pair[0].assign((rmsprop_decay * grad_pair[0]) + ((1 - rmsprop_decay) * grad_pair[1])) 
					for grad_pair in zip(avg_grads, grads)]
				update_avg_square_grads = [grad_pair[0].assign((rmsprop_decay * grad_pair[0]) + ((1 - rmsprop_decay) * tf.square(grad_pair[1]))) 
					for grad_pair in zip(avg_square_grads, grads)]
				avg_grad_updates = update_avg_grads + update_avg_square_grads

				rms = [tf.sqrt(avg_grad_pair[1] - tf.square(avg_grad_pair[0]) + rmsprop_constant)
					for avg_grad_pair in zip(avg_grads, avg_square_grads)]


				rms_updates = [grad_rms_pair[0] / grad_rms_pair[1] for grad_rms_pair in zip(grads, rms)]
				train = optimizer.apply_gradients(zip(rms_updates, params))

				return tf.group(train, tf.group(*avg_grad_updates))
开发者ID:hihell,项目名称:deep_rl_ale,代码行数:34,代码来源:parallel_q_network.py

示例8: optimize

    def optimize(self, learning_rate, train_layers,global_step,source_centroid,target_centroid):
        print '+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++'
	print train_layers
	var_list=[v for v in tf.trainable_variables() if v.name.split('/')[1] in ['conv1','conv2','fc1','fc2']]
	self.Gregloss=5e-4*tf.reduce_mean([tf.nn.l2_loss(x) for x in var_list if 'weights' in x.name])
	
	new_weights=[v for v in var_list if 'weights' in v.name or 'gamma' in v.name]
	new_biases=[v for v in var_list if 'biases' in v.name or 'beta' in v.name]

	
	print '==============new_weights======================='
	print new_weights
	print '==============new_biases======================='
	print new_biases

        self.F_loss=self.loss+self.Gregloss+global_step*self.Semanticloss+global_step*self.G_loss
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
	print '+++++++++++++++ batch norm update ops +++++++++++++++++'
  	print update_ops
	with tf.control_dependencies(update_ops):
	    train_op3=tf.train.MomentumOptimizer(learning_rate*1.0,0.9).minimize(self.F_loss, var_list=new_weights)
            train_op4=tf.train.MomentumOptimizer(learning_rate*2.0,0.9).minimize(self.F_loss, var_list=new_biases)
	train_op=tf.group(train_op3,train_op4)
	
	with tf.control_dependencies([train_op3,train_op4]):
	    update_sc=self.source_moving_centroid.assign(source_centroid)
	    update_tc=self.target_moving_centroid.assign(target_centroid)
	return tf.group(update_sc,update_tc)
开发者ID:slowbull,项目名称:Moving-Semantic-Transfer-Network,代码行数:28,代码来源:mstnmodel.py

示例9: solve

def solve(global_step):
    """add solver to losses"""
    # learning reate
    lr = _configure_learning_rate(82783, global_step)
    optimizer = _configure_optimizer(lr)
    tf.summary.scalar('learning_rate', lr)

    # compute and apply gradient
    losses = tf.get_collection(tf.GraphKeys.LOSSES)
    regular_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    regular_loss = tf.add_n(regular_losses)
    out_loss = tf.add_n(losses)
    total_loss = tf.add_n(losses + regular_losses)

    tf.summary.scalar('total_loss', total_loss)
    tf.summary.scalar('out_loss', out_loss)
    tf.summary.scalar('regular_loss', regular_loss)

    update_ops = []
    variables_to_train = _get_variables_to_train()
    # update_op = optimizer.minimize(total_loss)
    gradients = optimizer.compute_gradients(total_loss, var_list=variables_to_train)
    grad_updates = optimizer.apply_gradients(gradients, 
            global_step=global_step)
    update_ops.append(grad_updates)
    
    # update moving mean and variance
    if FLAGS.update_bn:
        update_bns = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        update_bn = tf.group(*update_bns)
        update_ops.append(update_bn)

    return tf.group(*update_ops)
开发者ID:imyourm8,项目名称:FastMaskRCNN,代码行数:33,代码来源:train.py

示例10: running_mean

def running_mean(cost, tag_name, batch_size=1):
    with tf.name_scope("running_mean_" + tag_name):
        with tf.variable_scope(tag_name):
            cost_sum = tf.get_variable(
              "cost_sum",
              initializer=tf.zeros_initializer,
              dtype=tf.float64,
              shape=(),
              collections=[tf.GraphKeys.LOCAL_VARIABLES],
              trainable=False)
            batches = tf.get_variable(
              "cost_num_batches",
              initializer=tf.zeros_initializer,
              dtype=tf.int32,
              shape=(),
              collections=[tf.GraphKeys.LOCAL_VARIABLES],
              trainable=False)

        cost_add = tf.assign_add(cost_sum, tf.cast(cost, dtype=tf.float64))
        batches_add = tf.assign_add(batches, batch_size)
        update_cost_mean = tf.group(cost_add, batches_add)

        reset_batches = tf.assign(batches, 0)
        reset_cost_sum = tf.assign(cost_sum, 0.0)
        reset_cost_mean = tf.group(reset_batches, reset_cost_sum)

        mean_cost = tf.divide(
          cost_sum,
          tf.cast(batches, dtype=tf.float64))
        train_loss_summary = tf.summary.scalar(tag_name, mean_cost)

    return reset_cost_mean, update_cost_mean, train_loss_summary
开发者ID:cupslab,项目名称:neural_network_cracking,代码行数:32,代码来源:pass_utils.py

示例11: AddTraining

  def AddTraining(self,
                  task_context,
                  batch_size,
                  learning_rate=0.1,
                  decay_steps=4000,
                  momentum=0.9,
                  corpus_name='documents'):
    """Builds a trainer to minimize the cross entropy cost function.

    Args:
      task_context: file path from which to read the task context
      batch_size: batch size to request from reader op
      learning_rate: initial value of the learning rate
      decay_steps: decay learning rate by 0.96 every this many steps
      momentum: momentum parameter used when training with momentum
      corpus_name: name of the task input to read parses from

    Returns:
      Dictionary of named training nodes.
    """
    with tf.name_scope('training'):
      nodes = self.training
      nodes.update(self._AddGoldReader(task_context, batch_size, corpus_name))
      nodes.update(self._BuildNetwork(nodes['feature_endpoints'],
                                      return_average=False))
      nodes.update(self._AddCostFunction(batch_size, nodes['gold_actions'],
                                         nodes['logits']))
      # Add the optimizer
      if self._only_train:
        trainable_params = [v
                            for k, v in self.params.iteritems()
                            if k in self._only_train]
      else:
        trainable_params = self.params.values()
      lr = self._AddLearningRate(learning_rate, decay_steps)
      optimizer = tf.train.MomentumOptimizer(lr,
                                             momentum,
                                             use_locking=self._use_locking)
      train_op = optimizer.minimize(nodes['cost'], var_list=trainable_params)
      for param in trainable_params:
        slot = optimizer.get_slot(param, 'momentum')
        self.inits[slot.name] = state_ops.init_variable(slot,
                                                        tf.zeros_initializer)
        self.variables[slot.name] = slot
      numerical_checks = [
          tf.check_numerics(param,
                            message='Parameter is not finite.')
          for param in trainable_params
          if param.dtype.base_dtype in [tf.float32, tf.float64]
      ]
      check_op = tf.group(*numerical_checks)
      avg_update_op = tf.group(*self._averaging.values())
      train_ops = [train_op]
      if self._check_parameters:
        train_ops.append(check_op)
      if self._use_averaging:
        train_ops.append(avg_update_op)
      nodes['train_op'] = tf.group(*train_ops, name='train_op')
    return nodes
开发者ID:TrendonixNetwork,项目名称:ProjectCybonix,代码行数:59,代码来源:graph_builder.py

示例12: create_variables

    def create_variables(self, settings):
        self.network_names = [
            'state_encoder',
            'action_decoder',
            'value_decoder',
        ]

        #### CREATE ALL THE NETWORKS
        self.networks = {
            name:parse_block(settings['networks'][name])
            for name in self.network_names
        }

        #### CREATE VARIABLES TO STORE GRADIENTS
        self.net_grads = {}
        for n in self.network_names:
            self.net_grads[n] = [
                tf.Variable(tf.zeros_like(v), name=v.name.split(':')[0]+"_grad")
                for v in self.networks[n].variables()
            ]

        #### CREATE COMBINED NETWORK: state -> action
        self.action_network = SequenceWrapper(
            [self.networks["state_encoder"], self.networks["action_decoder"]],
            scope="action_network")

        #### CREATE COMBINED NETWORK: state -> state_value
        self.value_network = SequenceWrapper(
            [self.networks["state_encoder"], self.networks["value_decoder"]],
            scope="value_network")

        #### COMPUTE STATE VALUE AND ACTION
        self.state        = self.networks["state_encoder"].input_placeholder()
        self.action_probs = self.action_network(self.state)
        self.action_id    = tf.argmax(self.action_probs, dimension=1)

        self.state_value        =  tf.reduce_sum(self.value_network(self.state), 1)

        #### COMPUTE ACTOR UPDATE
        self.reward             = tf.placeholder(tf.float32, (None,))
        self.chosen_action_id   = tf.placeholder(tf.int64, (None,))

        self.advantage          = self.reward - tf.stop_gradient(self.state_value)
        self.onehot             = tf.constant(np.diag(
                np.ones((self.num_actions,), dtype=np.float32)))
        self.chosen_action_mask = tf.nn.embedding_lookup(self.onehot, self.chosen_action_id)
        self.chosen_action_prob = tf.reduce_sum(self.action_probs * self.chosen_action_mask, 1)
        self.actor_loss         = - tf.log(self.chosen_action_prob) * self.advantage
        self.update_actor_grads = tf.group(*[
            self.update_network_grads('state_encoder', self.actor_loss),
            self.update_network_grads('action_decoder', self.actor_loss),
        ])

        #### COMPUTE VALUE NETWORK UPDATE
        self.value_loss         = tf.square(self.reward - self.state_value)
        self.update_value_grads = tf.group(*[
            self.update_network_grads('state_encoder', self.value_loss),
            self.update_network_grads('value_decoder', self.value_loss),
        ])
开发者ID:amoliu,项目名称:deeprl,代码行数:59,代码来源:enc_dec.py

示例13: __init__

  def __init__(self, target, name, do_inverses=False):
    self.name = name
    self.target = target
    self.do_inverses = do_inverses
    self.tf_svd = SvdTuple(tf.svd(target))
    self.update_counter = 0

    self.init = SvdTuple(
      ones(target.shape[0], name=name+"_s_init"),
      Identity(target.shape[0], name=name+"_u_init"),
      Identity(target.shape[0], name=name+"_v_init"),
      Identity(target.shape[0], name=name+"_inv_init"),
    )

    assert self.tf_svd.s.shape == self.init.s.shape
    assert self.tf_svd.u.shape == self.init.u.shape
    assert self.tf_svd.v.shape == self.init.v.shape
    #    assert self.tf_svd.inv.shape == self.init.inv.shape

    self.cached = SvdTuple(
      tf.Variable(self.init.s, name=name+"_s"),
      tf.Variable(self.init.u, name=name+"_u"),
      tf.Variable(self.init.v, name=name+"_v"),
      tf.Variable(self.init.inv, name=name+"_inv"),
    )

    self.s = self.cached.s
    self.u = self.cached.u
    self.v = self.cached.v
    self.inv = self.cached.inv
    
    self.holder = SvdTuple(
      tf.placeholder(default_dtype, shape=self.cached.s.shape, name=name+"_s_holder"),
      tf.placeholder(default_dtype, shape=self.cached.u.shape, name=name+"_u_holder"),
      tf.placeholder(default_dtype, shape=self.cached.v.shape, name=name+"_v_holder"),
      tf.placeholder(default_dtype, shape=self.cached.inv.shape, name=name+"_inv_holder")
    )

    self.update_tf_op = tf.group(
      self.cached.s.assign(self.tf_svd.s),
      self.cached.u.assign(self.tf_svd.u),
      self.cached.v.assign(self.tf_svd.v),
      self.cached.inv.assign(self.tf_svd.inv)
    )

    self.update_external_op = tf.group(
      self.cached.s.assign(self.holder.s),
      self.cached.u.assign(self.holder.u),
      self.cached.v.assign(self.holder.v),
    )

    self.update_externalinv_op = tf.group(
      self.cached.inv.assign(self.holder.inv),
    )


    self.init_ops = (self.s.initializer, self.u.initializer, self.v.initializer,
                     self.inv.initializer)
开发者ID:yaroslavvb,项目名称:stuff,代码行数:58,代码来源:util.py

示例14: test_mnist

    def test_mnist(self):
        import tensor_dynamic.data.input_data as mnist

        num_labeled = 100
        data = mnist.read_data_sets("../data/MNIST_data", n_labeled=num_labeled, one_hot=True)

        batch_size = 100
        num_epochs = 1
        num_examples = 60000
        num_iter = (num_examples/batch_size) * num_epochs
        starter_learning_rate = 0.02
        inputs = tf.placeholder(tf.float32, shape=(None, 784))
        targets = tf.placeholder(tf.float32)

        with tf.Session() as s:
            s.as_default()
            i = InputLayer(inputs)
            l1 = LadderLayer(i, 500, 1000.0, s)
            l2 = LadderGammaLayer(l1, 10, 10.0, s)
            ladder = LadderOutputLayer(l2, 0.1, s)

            loss = ladder.cost_all_layers_train(targets)
            learning_rate = tf.Variable(starter_learning_rate, trainable=False)
            train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)

            bn_updates = tf.group(*(l1.bn_assigns + l2.bn_assigns))
            with tf.control_dependencies([train_step]):
                train_step = tf.group(bn_updates)
            pred_cost = -tf.reduce_mean(tf.reduce_sum(targets * tf.log(tf.clip_by_value(ladder.activation_predict, 1e-10, 1.0)), 1))  # cost used for prediction

            correct_prediction = tf.equal(tf.argmax(ladder.activation_predict, 1), tf.argmax(targets, 1))  # no of correct predictions
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) * tf.constant(100.0)

            s.run(tf.initialize_all_variables())

            #print "init accuracy", s.run([accuracy], feed_dict={inputs: data.test.images, targets: data.test.labels})

            min_loss = 100000.

            writer = tf.train.SummaryWriter("/tmp/td", s.graph_def)
            writer.add_graph(s.graph_def)

            for i in range(num_iter):
                images, labels = data.train.next_batch(batch_size)
                _, loss_val = s.run([train_step, loss], feed_dict={inputs: images, targets: labels})

                if loss_val < min_loss:
                    min_loss = loss_val
                print(i, loss_val)

                # print "acc", s.run([accuracy], feed_dict={inputs: data.test.images, targets: data.test.labels})

            #acc = s.run(accuracy, feed_dict={inputs: data.test.images, targets: data.test.labels})
            print "min loss", min_loss
            #print "final accuracy ", acc
            self.assertLess(min_loss, 20.0)
开发者ID:DanielSlater,项目名称:tensordynamic,代码行数:56,代码来源:test_ladder_layer.py

示例15: apply_gradients

  def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    var_list = [ v for _,v in grads_and_vars]
    d_vars = []
    g_vars = []
    for grad,var in grads_and_vars:
        if var in self.gan.d_vars():
            d_vars += [var]
        elif var in self.gan.g_vars():
            g_vars += [var]
        else:
            raise("Couldn't find var in g_vars or d_vars")
    w = [tf.Variable(self.config.start_at or 0.0), tf.Variable(self.config.start_at or 0.0)]

    Vidv = [self.gan.trainer.d_loss, self.gan.trainer.g_loss]
    #Vsoc = [1/2. * self.gan.trainer.d_loss + 1/2.* self.gan.trainer.g_loss, -1/2. * self.gan.trainer.d_loss - 1/2.* self.gan.trainer.g_loss]
    Vsoc = [1/2. * self.gan.trainer.d_loss + 1/2.* self.gan.trainer.g_loss, 1/2. * self.gan.trainer.d_loss + 1/2.* self.gan.trainer.g_loss]

    wlr = self.config.w_learn_rate or 0.01
    wt1 = [w[0] + wlr * (Vidv[0] - Vsoc[0]), w[1] + wlr * (Vidv[1] - Vsoc[1])]
    def clamped(net):
        return tf.maximum(self.config.min or 0., tf.minimum(net, self.config.max or 1.))

    self._prepare()

    wt1 = [clamped(wt1[0]),clamped(wt1[1])]
    self.gan.add_metric('wt0', wt1[0])
    self.gan.add_metric('wt1', wt1[1])
    op1 = tf.group(*[tf.assign(w, v) for w,v in zip(w, wt1)]) # store variables

    with tf.get_default_graph().control_dependencies([op1]):
        Vi = [(1. - w[0]) * Vidv[0] + w[0] * Vsoc[0],
              (1. - w[1]) * Vidv[1] + w[1] * Vsoc[1]]
        if self.config.reverse_w:
            Vi = [(w[0]) * Vidv[0] + (1.0-w[0]) * Vsoc[0],
                  (w[1]) * Vidv[1] + (1.0-w[1]) * Vsoc[1]]
        self.gan.add_metric('w0', w[0])
        self.gan.add_metric('w1', w[1])

        new_grads = tf.gradients(Vi[0], d_vars) + tf.gradients(Vi[1], g_vars)
        self.gan.trainer.d_loss = Vi[0]
        self.gan.trainer.g_loss = Vi[1]
        new_grads_and_vars = list(zip(new_grads, var_list)).copy()
        op3 = self.optimizer.apply_gradients(new_grads_and_vars.copy(), global_step=global_step, name=name)
        with tf.get_default_graph().control_dependencies([op3]):
            if(self.config.w_l1):
                # return to selfish state
                wt1 = [wt1[0] + self.config.w_l1 * ((self.config.l1_default or 0.0)-wt1[0]),
                       wt1[1] + self.config.w_l1 * ((self.config.l1_default or 0.0)-wt1[1])]
                op4 = tf.group(*[tf.assign(w, v) for w,v in zip(w, wt1)]) # store variables
                with tf.get_default_graph().control_dependencies([op4]):
                    self.gan.add_metric('l1w0', w[0])
                    self.gan.add_metric('l1w1', w[1])
                    return tf.no_op()

            else:
                return tf.no_op()
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:56,代码来源:social_optimizer.py


注:本文中的tensorflow.group函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。