当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.trainable_variables函数代码示例

本文整理汇总了Python中tensorflow.trainable_variables函数的典型用法代码示例。如果您正苦于以下问题:Python trainable_variables函数的具体用法?Python trainable_variables怎么用?Python trainable_variables使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了trainable_variables函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

 def __init__(self,learning_rate, cost,feed,sess,m,comm,size,rank):
     self.Y=[]
     self.S=[]
     self.YS=[]
     self.cost=cost
     self.sess=sess
     self.NumIter=0
     self.m=m
     self.counter=0
     self.gradientEval=0
     self.functionEval=0
     self.last_func=0
     self.innerEval=0
     self.HessianEval=0
     self.last_z1=0.01
     self.memorySize=0
     self.rank=rank
     self.comm=comm
     self.size=size
     v=[]
     self.assign_placeholders=[]
     assign_op=[]
     for t in tf.trainable_variables():
         v.append(sess.run(t))
         self.assign_placeholders.append(tf.placeholder(shape=v[-1].shape,dtype="float32"))
         assign_op.append(t.assign(self.assign_placeholders[-1]))
     self.assign=tf.group(*assign_op)
     self.var=np.array(v)
     # self.var=np.load('var.npy')
     np.save('var.npy',self.var)
     comm.scatter(['Init' for i in range(size)],root=rank)
     self.gradient=tf.gradients(cost,tf.trainable_variables(),gate_gradients=True)
     self.learningRate=learning_rate
     self.old_grad=None
开发者ID:Vendea,项目名称:summer-research-2016,代码行数:34,代码来源:lbfgs_optimizer.py

示例2: train

def train(lr, total_loss, global_step):
    # Variables that affect learning rate.

    # Compute gradients.
    #with tf.control_dependencies([loss_averages_op]):
    opt = tf.train.GradientDescentOptimizer(lr)
    grads = opt.compute_gradients(total_loss)

    # Add histograms for gradients.
    for i, (grad, var) in enumerate(grads):
        if grad is not None:
            tf.histogram_summary(var.op.name + '/gradients', grad)
            grads[i] = (tf.clip_by_norm(grad, 5), var)

    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

    # Add histograms for trainable variables.
    for var in tf.trainable_variables():
        tf.histogram_summary(var.op.name, var)

    # Track the moving averages of all trainable variables.
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
        train_op = tf.no_op(name='train')

    return train_op
开发者ID:danfeiX,项目名称:drl,代码行数:29,代码来源:dqn.py

示例3: __init__

    def __init__(self, sess, state_dim, action_dim, learning_rate, tau, num_actor_vars):
        self.sess = sess
        self.s_dim = state_dim
        self.a_dim = action_dim
        self.learning_rate = learning_rate
        self.tau = tau

        # Create the critic network
        self.inputs, self.action, self.out = self.create_critic_network()

        self.network_params = tf.trainable_variables()[num_actor_vars:]

        # Target Network
        self.target_inputs, self.target_action, self.target_out = self.create_critic_network()
        
        self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]

        # Op for periodically updating target network with online network weights with regularization
        self.update_target_network_params = \
            [self.target_network_params[i].assign(tf.mul(self.network_params[i], self.tau) + tf.mul(self.target_network_params[i], 1. - self.tau))
                for i in range(len(self.target_network_params))]
    
        # Network target (y_i)
        self.predicted_q_value = tf.placeholder(tf.float32, [None, 1])

        # Define loss and optimization Op
        self.loss = tflearn.mean_square(self.predicted_q_value, self.out)
        self.optimize = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)

        # Get the gradient of the net w.r.t. the action
        self.action_grads = tf.gradients(self.out, self.action)
开发者ID:ataitler,项目名称:DQN,代码行数:31,代码来源:ddpg.py

示例4: build_gen_graph

    def build_gen_graph(self):
        # forward pass through generator
        # returns a (batch_size, sequence_length, input_dim) for generated
        self.generated, self.timestep_probs, self.predicted_rewards = self.generate()

        # get the predictions from the discriminator
        # returns a (batch_size, 1) output
        self.gen_scores = self.discriminate(self.generated, reuse=False)

        # formulate the policy gradient loss
        self.gen_train_loss_out, self.baseline_loss = self.gen_train_loss(self.gen_scores,
             self.predicted_rewards)

        # get generative parameters and baseline params
        self.g_params = [p for p in tf.trainable_variables() if 'g' in p.name and 'b' not in p.name]
        self.b_params = [p for p in tf.trainable_variables() if 'b' in p.name]

        # create the gen train op
        self.gen_optimize_rewards(self.gen_train_loss_out)

        # create the baseline train op
        if self.opts.with_baseline:
            self.optimize_baseline(self.baseline_loss)

        # initialize all variable and prep to save model
        tf.initialize_all_variables().run()
开发者ID:wulfebw,项目名称:adversarial_rl,代码行数:26,代码来源:discrete_rgan.py

示例5: train

def train(total_loss, global_step):
  """Train CIFAR-10 model.

  Create an optimizer and apply to all trainable variables. Add moving
  average for all trainable variables.

  Args:
    total_loss: Total loss from loss().
    global_step: Integer Variable counting the number of training steps
      processed.
  Returns:
    train_op: op for training.
  """
  # Variables that affect learning rate.
  num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
  decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)

  # Decay the learning rate exponentially based on the number of steps.
  lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
                                  global_step,
                                  decay_steps,
                                  LEARNING_RATE_DECAY_FACTOR,
                                  staircase=True)
  tf.scalar_summary('learning_rate', lr)

  # Generate moving averages of all losses and associated summaries.
  loss_averages_op = _add_loss_summaries(total_loss)

  # Compute gradients.
  with tf.control_dependencies([loss_averages_op]):
    # opt = tf.train.GradientDescentOptimizer(lr)
    opt = tf.train.AdamOptimizer(learning_rate=0.0001,
                                       beta1=0.9,
                                       beta2=0.999,
                                       epsilon=1e-08,
                                       use_locking=False,
                                       name='Adam')#.minimize(loss,global_step=batch)
    grads = opt.compute_gradients(total_loss)

  # Apply gradients.
  apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

  # Add histograms for trainable variables.
  for var in tf.trainable_variables():
    tf.histogram_summary(var.op.name, var)

  # Add histograms for gradients.
  for grad, var in grads:
    if grad is not None:
      tf.histogram_summary(var.op.name + '/gradients', grad)

  # Track the moving averages of all trainable variables.
  variable_averages = tf.train.ExponentialMovingAverage(
      MOVING_AVERAGE_DECAY, global_step)
  variables_averages_op = variable_averages.apply(tf.trainable_variables())

  with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
    train_op = tf.no_op(name='train')

  return train_op
开发者ID:Thrasi,项目名称:thesis-project,代码行数:60,代码来源:cifar10.py

示例6: __init__

    def __init__(self,
                 sess,
                 state_dim,
                 action_dim,
                 action_high,
                 action_low,
                 learning_rate,
                 grad_norm_clip,
                 tau,
                 batch_size,
                 name=None):
        self.sess = sess
        self.s_dim = state_dim
        self.a_dim = action_dim
        self.a_high = action_high
        self.a_low = action_low
        self.learning_rate = learning_rate
        self.grad_norm_clip = grad_norm_clip
        self.tau = tau
        self.batch_size = batch_size

        # create networks
        net_name = 'actor' if name is None else name
        with tf.variable_scope(net_name):
            self.obs, self.action = self.create_actor_network()
        self.params = tf.trainable_variables(scope=net_name)
        with tf.variable_scope(net_name + '_target'):
            self.target_obs, self.target_action = self.create_actor_network()
        self.target_params = tf.trainable_variables(scope=net_name + '_target')

        # create ops
        (self.update_target_op,
         self.action_gradient,
         self.train_op) = self.create_actor_ops()
开发者ID:zhang01GA,项目名称:cloudml-samples,代码行数:34,代码来源:actor_critic.py

示例7: run_model

  def run_model(self, train_config, eval_config):
    with tf.Graph().as_default() as g:
      train_model = base_model(params=train_config, mode="train", hvd=None)
      train_model.compile()
      eval_model = base_model(params=eval_config, mode="eval", hvd=None)
      eval_model.compile(force_var_reuse=True)

      train(train_model, eval_model)
      saver = tf.train.Saver()
      checkpoint = tf.train.latest_checkpoint(train_model.params['logdir'])
      with self.test_session(g, use_gpu=True) as sess:
        saver.restore(sess, checkpoint)
        sess.run([train_model.get_data_layer(i).iterator.initializer
                  for i in range(train_model.num_gpus)])
        sess.run([eval_model.get_data_layer(i).iterator.initializer
                  for i in range(eval_model.num_gpus)])

        weights = sess.run(tf.trainable_variables())
        loss = sess.run(train_model.loss)
        eval_losses = sess.run(eval_model.eval_losses)
        eval_loss = np.mean(eval_losses)
        weights_new = sess.run(tf.trainable_variables())

        # checking that the weights has not changed from just computing the loss
        for w, w_new in zip(weights, weights_new):
          npt.assert_allclose(w, w_new)
      eval_dict = evaluate(eval_model, checkpoint)
    return loss, eval_loss, eval_dict
开发者ID:fotwo,项目名称:OpenSeq2Seq,代码行数:28,代码来源:speech2text_test.py

示例8: testCustomGetter

  def testCustomGetter(self):
    custom_getter = snt.custom_getters.Context(snt.custom_getters.stop_gradient)
    module = snt.nets.ConvNet2D(output_channels=self.output_channels,
                                kernel_shapes=self.kernel_shapes,
                                rates=self.rates,
                                strides=self.strides,
                                paddings=self.paddings,
                                custom_getter=custom_getter)

    input_shape = [10, 100, 100, 3]
    input_to_net = tf.random_normal(dtype=tf.float32, shape=input_shape)

    if tf.executing_eagerly():
      with tf.GradientTape() as tape0:
        out0 = module(input_to_net)
      with tf.GradientTape() as tape1:
        with custom_getter:
          out1 = module(input_to_net)
      all_vars = tf.trainable_variables()
      out0_grads = tape0.gradient(out0, all_vars)
      out1_grads = tape1.gradient(out1, all_vars)

    else:
      out0 = module(input_to_net)
      with custom_getter:
        out1 = module(input_to_net)
      all_vars = tf.trainable_variables()
      out0_grads = tf.gradients(out0, all_vars)
      out1_grads = tf.gradients(out1, all_vars)

    for grad in out0_grads:
      self.assertNotEqual(None, grad)
    self.assertEqual([None] * len(out1_grads), out1_grads)
开发者ID:ccchang0111,项目名称:sonnet,代码行数:33,代码来源:convnet_test.py

示例9: __init__

    def __init__(self, actions, name=NAME, learning_rate=1e-4,  x_dim=210, y_dim=160, eps_start=1.0, eps_decay=0.0000001, eps_end=0.1, num_channels=3, should_train=True, from_checkpoint=None, player_id=1):
        Agent.__init__(self, name=name, actions=[])
        self.learning_rate = learning_rate
        self.x_dim, self.y_dim = x_dim, y_dim
        self.actions, self.num_actions = actions, len(actions)
        self.hidden_layers = [32, 32]
        self.num_channels = num_channels
        self.eps_start, self.epsilon_decay, self.epsilon_end = eps_start, eps_decay, eps_end
        self.should_train = should_train
        self.reset()

        # Parameters for updating target network.
        tau = 0.001

        # TODO: Update to support player_id > 2.
        # NOTE: This is a bit of a hack to update the variables in the target
        # network. It can be fixed by using scope and Tensorflow 1.4 which takes
        # a scope argument in tf.trainable_variables().
        if player_id == 2:
            vs = tf.trainable_variables()
            self.target_ops = update_target_graph(vs[len(vs)//2:], tau)
        else:
            self.target_ops = update_target_graph(tf.trainable_variables(), tau)

        # Load model from a checkpoint
        if not (from_checkpoint is None):
            self.saver.restore(self.sess, from_checkpoint)
            print('Restored model from checkpoint: {}'.format(from_checkpoint))
开发者ID:david-abel,项目名称:simple_rl,代码行数:28,代码来源:DQNAgentClass.py

示例10: optim

def optim(loss, **kwargs):
    r"""Applies gradients to variables.

    Args:
        loss: A 0-D `Tensor` containing the value to minimize.
        kwargs:
          optim: A name for optimizer. 'MaxProp' (default), 'AdaMax', 'Adam', or 'sgd'.
          lr: A Python Scalar (optional). Learning rate. Default is .001.
          beta1: A Python Scalar (optional). Default is .9.
          beta2: A Python Scalar (optional). Default is .99.
          category: A string or string list. Specifies the variables that should be trained (optional).
            Only if the name of a trainable variable starts with `category`, it's value is updated.
            Default is '', which means all trainable variables are updated.
    """
    opt = Opt(kwargs)
    # opt += Opt(optim='MaxProp', lr=0.001, beta1=0.9, beta2=0.99, category='')

    # default training options
    opt += Opt(optim='MaxProp', lr=0.001, beta1=0.9, beta2=0.99, category='')

    # select optimizer
    # if opt.optim == 'MaxProp':
        # optim = tf.sg_optimize.MaxPropOptimizer(learning_rate=opt.lr, beta2=opt.beta2)
    # elif opt.optim == 'AdaMax':
        # optim = tf.sg_optimize.AdaMaxOptimizer(learning_rate=opt.lr, beta1=opt.beta1, beta2=opt.beta2)
    # elif opt.optim == 'Adam':
    if opt.optim == 'Adm':
        optim = tf.train.AdamOptimizer(learning_rate=opt.lr, beta1=opt.beta1, beta2=opt.beta2)
    else:
        optim = tf.train.GradientDescentOptimizer(learning_rate=opt.lr)

    # get trainable variables
    if isinstance(opt.category, (tuple, list)):
        var_list = []
        for cat in opt.category:
            var_list.extend([t for t in tf.trainable_variables() if t.name.startswith(cat)])
    else:
        var_list = [t for t in tf.trainable_variables() if t.name.startswith(opt.category)]

    # calc gradient
    gradient = optim.compute_gradients(loss, var_list=var_list)

    # add summary
    for v, g in zip(var_list, gradient):
        # exclude batch normal statics
        if 'mean' not in v.name and 'variance' not in v.name \
                and 'beta' not in v.name and 'gamma' not in v.name:
                prefix = ''
                # summary name
                name = prefix + ''.join(v.name.split(':')[:-1])
                # summary statistics
                # noinspection PyBroadException
                try:
                    tf.summary.scalar(name + '/grad', tf.global_norm([g]))
                    tf.summary.histogram(name + '/grad-h', g)
                except:
                    pass
    global_step = tf.Variable(0, name='global_step', trainable=False)
    # gradient update op
    return optim.apply_gradients(gradient, global_step=global_step), global_step
开发者ID:SiyuanWei,项目名称:tensorflow-101,代码行数:60,代码来源:optimizer.py

示例11: __init__

	def __init__(self, num_actions, num_states, num_trainable_vars):		
		self._num_actions = num_actions
		self._num_states = num_states
	
		# Input (not the cell state)
		self.state = tf.placeholder(tf.float32, [1,num_states])

		# Weights for policy output layer
		self.W_fc1 = self.init_torch_matrix([rnn_size, num_actions])
		self.b_fc1 = self.init_torch_vector([num_actions], rnn_size)
		
		# Weights for value output layer
		self.W_fc2 = self.init_torch_matrix([rnn_size, 1])
		self.b_fc2 = self.init_torch_vector([1], rnn_size)	
		
		rnn_cell = tf.nn.rnn_cell.BasicRNNCell(rnn_size, activation=tf.identity) ### Use LSTM
		### Dropout?
		self.cell = tf.nn.rnn_cell.MultiRNNCell([rnn_cell] * num_rnn_layers)

		self.rnn_state = self.cell.zero_state(1, tf.float32)
		output, rnn_state_out = self.cell(self.state, self.rnn_state)
		
		self.rnn_state_out = rnn_state_out
	
		# policy (output)
		self.pi = tf.nn.softmax(tf.matmul(output, self.W_fc1) + self.b_fc1)

		# value - linear output layer
		self.v = tf.matmul(output, self.W_fc2) + self.b_fc2
		
		if num_trainable_vars[0] == None:
			num_trainable_vars[0] = len(tf.trainable_variables())
		
		self.trainable_vars = tf.trainable_variables()[-num_trainable_vars[0]:]
开发者ID:cjratcliff,项目名称:async_deep_reinforce,代码行数:34,代码来源:ac_network.py

示例12: assign_w2v_pretrained_vectors

def assign_w2v_pretrained_vectors(session, word2vec_model, embedding_key, vocab_path, vocab_size, id_to_check):
    embedding_variable = [v for v in tf.trainable_variables() if embedding_key in v.name]
    if len(embedding_variable) != 1:
        print("Word vector variable not found or too many. key: " + embedding_key)
        print("Existing embedding trainable variables:")
        print([v.name for v in tf.trainable_variables() if "embedding" in v.name])
        sys.exit(1)

    embedding_variable = embedding_variable[0]
    vectors = embedding_variable.eval()

    with gfile.GFile(vocab_path, mode="r") as vocab_file:
        counter = 0
        while counter < vocab_size:
            vocab_w = vocab_file.readline().replace("\n", "")
            # for each word in vocabulary check if w2v vector exist and inject.
            # otherwise dont change value initialise randomly.
            if vocab_w and word2vec_model.__contains__(vocab_w):
                w2w_word_vector = word2vec_model.get_vector(vocab_w)
                vectors[counter] = w2w_word_vector
            if counter == id_to_check:
                print(vectors[counter])
            counter += 1
    print("Reinitialising embeddings with pretrained")
    session.run(tf.assign(embedding_variable, vectors))
开发者ID:jonathanmanfield,项目名称:deepreferendum,代码行数:25,代码来源:embeddings_utils.py

示例13: __init__

    def __init__(self, max_gradient, batch_size, time_steps, vocabulary_size, hidden_units, layers):
        self.max_gradient = max_gradient
        self.layers = layers
        # Add vocabulary slots of out of vocabulary (index 1) and padding (index 0).
        vocabulary_size += 2

        with tf.name_scope("Parameters"):
            self.learning_rate = tf.placeholder(tf.float32, name="learning_rate")
            self.keep_probability = tf.placeholder(tf.float32, name="keep_probability")

        with tf.name_scope("Input"):
            self.input = tf.placeholder(tf.int64, shape=(batch_size, time_steps), name="input")
            self.targets = tf.placeholder(tf.int64, shape=(batch_size, time_steps), name="targets")
            self.init = tf.placeholder(tf.float32, shape=(), name="init")

        with tf.name_scope("Embedding"):
            self.embedding = tf.Variable(tf.random_uniform((vocabulary_size, hidden_units), -self.init, self.init),
                                         dtype=tf.float32,
                                         name="embedding")
            self.embedded_input = tf.nn.embedding_lookup(self.embedding, self.input, name="embedded_input")

        with tf.name_scope("RNN"):
            cell = tf.nn.rnn_cell.LSTMCell(hidden_units)
            cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=self.keep_probability)
            rnn_layers = tf.nn.rnn_cell.MultiRNNCell([cell] * layers)
            self.reset_state = rnn_layers.zero_state(batch_size, dtype=tf.float32)
            self.state = tf.placeholder(tf.float32, self.reset_state.get_shape(), "state")
            self.outputs, self.next_state = tf.nn.dynamic_rnn(rnn_layers, self.embedded_input, time_major=True,
                                                              initial_state=self.state)

        with tf.name_scope("Cost"):
            # Concatenate all the batches into a single row.
            self.flattened_outputs = tf.reshape(tf.concat(1, self.outputs), (-1, hidden_units),
                                                name="flattened_outputs")
            # Project the outputs onto the vocabulary.
            self.w = tf.get_variable("w", (hidden_units, vocabulary_size))
            self.b = tf.get_variable("b", vocabulary_size)
            self.predicted = tf.matmul(self.flattened_outputs, self.w) + self.b
            # Compare predictions to labels.
            self.loss = tf.nn.seq2seq.sequence_loss_by_example([self.predicted], [tf.concat(-1, self.targets)],
                                                               [tf.ones(batch_size * time_steps)])
            self.cost = tf.div(tf.reduce_sum(self.loss), batch_size, name="cost")

        with tf.name_scope("Train"):
            self.validation_perplexity = tf.Variable(dtype=tf.float32, initial_value=float("inf"), trainable=False,
                                                     name="validation_perplexity")
            tf.scalar_summary(self.validation_perplexity.op.name, self.validation_perplexity)
            self.training_epoch_perplexity = tf.Variable(dtype=tf.float32, initial_value=float("inf"), trainable=False,
                                                         name="training_epoch_perplexity")
            tf.scalar_summary(self.training_epoch_perplexity.op.name, self.training_epoch_perplexity)
            self.iteration = tf.Variable(0, dtype=tf.int64, name="iteration", trainable=False)
            self.gradients, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tf.trainable_variables()),
                                                       max_gradient, name="clip_gradients")
            optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
            self.train_step = optimizer.apply_gradients(zip(self.gradients, tf.trainable_variables()),
                                                        name="train_step",
                                                        global_step=self.iteration)

        self.initialize = tf.initialize_all_variables()
        self.summary = tf.merge_all_summaries()
开发者ID:wpm,项目名称:tfrnnlm,代码行数:60,代码来源:rnn.py

示例14: testFunctionalDenseTwiceReuse

 def testFunctionalDenseTwiceReuse(self):
   inputs = tf.random_uniform((5, 3), seed=1)
   core_layers.dense(inputs, 2, name='my_dense')
   vars1 = tf.trainable_variables()
   core_layers.dense(inputs, 2, name='my_dense', reuse=True)
   vars2 = tf.trainable_variables()
   self.assertEqual(vars1, vars2)
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:7,代码来源:core_test.py

示例15: evaluate

def evaluate():
    with tf.Graph().as_default():
        # testデータのロード
        images, labels = data_inputs.inputs('data/train_kirin_norm_32.tfrecords')
        logits = model.inference(images)

        top_k_op = tf.nn.in_top_k(logits, labels, 1)
        
        variable_averages = tf.train.ExponentialMovingAverage(FLAGS.moving_average_decay)
        variables_to_restore = {}
        for v in tf.trainable_variables():
            if v in tf.trainable_variables():
                restore_name = variable_averages.average_name(v)
            else:
                restore_name = v.op.name
            variables_to_restore[restore_name] = v
        saver = tf.train.Saver(variables_to_restore)
        summary_op = tf.merge_all_summaries()

        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, graph_def=graph_def)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
开发者ID:pmnyc,项目名称:Machine_Learning_Test_Repository,代码行数:27,代码来源:eval.py


注:本文中的tensorflow.trainable_variables函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。