当前位置: 首页>>代码示例>>Python>>正文


Python slim.fully_connected函数代码示例

本文整理汇总了Python中tensorflow.contrib.slim.fully_connected函数的典型用法代码示例。如果您正苦于以下问题:Python fully_connected函数的具体用法?Python fully_connected怎么用?Python fully_connected使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了fully_connected函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _build_graph

    def _build_graph(self):

        normalized_input = tf.div(self._input, 255.0)

        #d = tf.divide(1.0, tf.sqrt(8. * 8. * 4.))
        conv1 = slim.conv2d(normalized_input, 16, [8, 8], activation_fn=tf.nn.relu,
                            padding='VALID', stride=4, biases_initializer=None)
                            # weights_initializer=tf.random_uniform_initializer(minval=-d, maxval=d))

        #d = tf.divide(1.0, tf.sqrt(4. * 4. * 16.))
        conv2 = slim.conv2d(conv1, 32, [4, 4], activation_fn=tf.nn.relu,
                            padding='VALID', stride=2, biases_initializer=None)
                            #weights_initializer=tf.random_uniform_initializer(minval=-d, maxval=d))

        flattened = slim.flatten(conv2)

        #d = tf.divide(1.0, tf.sqrt(2592.))
        fc1 = slim.fully_connected(flattened, 256, activation_fn=tf.nn.relu, biases_initializer=None)
                                   #weights_initializer=tf.random_uniform_initializer(minval=-d, maxval=d))

        #d = tf.divide(1.0, tf.sqrt(256.))
        # estimate of the value function
        self.value_func_prediction = slim.fully_connected(fc1, 1, activation_fn=None, biases_initializer=None)
                                                          #weights_initializer=tf.random_uniform_initializer(minval=-d, maxval=d))

        # softmax output with one entry per action representing the probability of taking an action
        self.policy_predictions = slim.fully_connected(fc1, self.output_size, activation_fn=tf.nn.softmax,
                                                       biases_initializer=None)
开发者ID:thalles753,项目名称:machine-learning,代码行数:28,代码来源:A3C_Network.py

示例2: _create_transformation

  def _create_transformation(self, input, n_output, reuse, scope_prefix):
    """Create the deterministic transformation between stochastic layers.

    If self.hparam.nonlinear:
        2 x tanh layers
    Else:
        1 x linear layer
    """
    if self.hparams.nonlinear:
      h = slim.fully_connected(input,
                               self.hparams.n_hidden,
                               reuse=reuse,
                               activation_fn=tf.nn.tanh,
                               scope='%s_nonlinear_1' % scope_prefix)
      h = slim.fully_connected(h,
                               self.hparams.n_hidden,
                               reuse=reuse,
                               activation_fn=tf.nn.tanh,
                               scope='%s_nonlinear_2' % scope_prefix)
      h = slim.fully_connected(h,
                               n_output,
                               reuse=reuse,
                               activation_fn=None,
                               scope='%s' % scope_prefix)
    else:
      h = slim.fully_connected(input,
                               n_output,
                               reuse=reuse,
                               activation_fn=None,
                               scope='%s' % scope_prefix)
    return h
开发者ID:ALISCIFP,项目名称:models,代码行数:31,代码来源:rebar.py

示例3: create_network

    def create_network(self, name):
        with tf.variable_scope(name) as scope:

            inputs = tf.placeholder(fl32, [None, self.state_dim], 'inputs')
            actions = tf.placeholder(fl32, [None, self.action_dim], 'actions')

            with slim.arg_scope(
                [slim.fully_connected],
                activation_fn=relu,
                weights_initializer=uniform,
                weights_regularizer=None
            ):

                net = tf.concat(1, [inputs, actions])
                net = slim.fully_connected(net, 400)
                net = slim.fully_connected(net, 300)
                '''net = slim.fully_connected(inputs, 400)
                w1 = tf.get_variable(
                    "w1", shape=[400, 300], initializer=uniform
                )
                w2 = tf.get_variable(
                    "w2", shape=[self.action_dim, 300], initializer=uniform
                )
                b = tf.get_variable(
                    "b", shape=[300], initializer=constant
                )
                net = relu(tf.matmul(net, w1) + tf.matmul(actions, w2) + b)'''
                out = slim.fully_connected(net, 1, activation_fn=None)

        return (inputs, actions, out, scope.name)
开发者ID:jpp46,项目名称:CurrentProjects,代码行数:30,代码来源:networks.py

示例4: __init__

    def __init__(self):
        # policy network
        self.observations = tf.placeholder(tf.float32, [None, 4], name='input_x')
        self.input_y = tf.placeholder(tf.float32, [None, 1], name='input_y')
        self.reward = tf.placeholder(tf.float32, name='reward_signal')
        l1 = slim.fully_connected(self.observations,
                                  hidden,
                                  biases_initializer=None,
                                  activation_fn=tf.nn.relu)
        self.score = slim.fully_connected(l1,
                                          1,
                                          biases_initializer=None)
        self.probability = tf.nn.sigmoid(self.score)
        loglike = tf.log(self.input_y * (self.input_y - self.probability)
                         + (1 - self.input_y) * (self.input_y + self.probability))
        loss = -tf.reduce_mean(loglike * self.reward)

        self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        self.w1grad = tf.placeholder(tf.float32, name='batch_grad1')
        self.w2grad = tf.placeholder(tf.float32, name='batch_grad2')
        batch_grad = [self.w1grad, self.w2grad]

        self.tvars = tf.trainable_variables()
        self.newgrads = tf.gradients(loss, self.tvars)
        self.update = self.optimizer.apply_gradients(zip(batch_grad, self.tvars))
开发者ID:yaoyaowd,项目名称:tensorflow_demo,代码行数:25,代码来源:3_model_rl.py

示例5: discriminative_network

def discriminative_network(x):
  """Outputs probability in logits."""
  h0 = slim.fully_connected(x, H * 2, activation_fn=tf.tanh)
  h1 = slim.fully_connected(h0, H * 2, activation_fn=tf.tanh)
  h2 = slim.fully_connected(h1, H * 2, activation_fn=tf.tanh)
  h3 = slim.fully_connected(h2, 1, activation_fn=None)
  return h3
开发者ID:ekostem,项目名称:edward,代码行数:7,代码来源:gan_wasserstein_synthetic.py

示例6: _build_layers

    def _build_layers(self, inputs, num_outputs, options):
        """Process the flattened inputs.

        Note that dict inputs will be flattened into a vector. To define a
        model that processes the components separately, use _build_layers_v2().
        """

        hiddens = options.get("fcnet_hiddens")
        activation = get_activation_fn(options.get("fcnet_activation"))

        with tf.name_scope("fc_net"):
            i = 1
            last_layer = inputs
            for size in hiddens:
                label = "fc{}".format(i)
                last_layer = slim.fully_connected(
                    last_layer,
                    size,
                    weights_initializer=normc_initializer(1.0),
                    activation_fn=activation,
                    scope=label)
                i += 1
            label = "fc_out"
            output = slim.fully_connected(
                last_layer,
                num_outputs,
                weights_initializer=normc_initializer(0.01),
                activation_fn=None,
                scope=label)
            return output, last_layer
开发者ID:jamescasbon,项目名称:ray,代码行数:30,代码来源:fcnet.py

示例7: __init__

    def __init__(self, lr, s_size, a_size, h_size):
        # These lines established the feed-forward part of the network. The agent takes a state and produces an action.
        self.state_in = tf.placeholder(shape=[None, s_size], dtype=tf.float32)
        hidden = slim.fully_connected(self.state_in, h_size, biases_initializer=None, activation_fn=tf.nn.relu)
        self.output = slim.fully_connected(hidden, a_size, activation_fn=tf.nn.softmax, biases_initializer=None)
        self.chosen_action = tf.argmax(self.output, 1)

        # The next six lines establish the training proceedure. We feed the reward and chosen action into the network
        # to compute the loss, and use it to update the network.
        self.reward_holder = tf.placeholder(shape=[None], dtype=tf.float32)
        self.action_holder = tf.placeholder(shape=[None], dtype=tf.int32)

        self.indexes = tf.range(0, tf.shape(self.output)[0]) * tf.shape(self.output)[1] + self.action_holder
        self.responsible_outputs = tf.gather(tf.reshape(self.output, [-1]), self.indexes)

        self.loss = -tf.reduce_mean(tf.log(self.responsible_outputs) * self.reward_holder)

        tvars = tf.trainable_variables()
        self.gradient_holders = []
        for idx2, var in enumerate(tvars):
            placeholder = tf.placeholder(tf.float32, name=str(idx2) + '_holder')
            self.gradient_holders.append(placeholder)

        self.gradients = tf.gradients(self.loss, tvars)

        optimizer = tf.train.AdamOptimizer(learning_rate=lr)
        self.update_batch = optimizer.apply_gradients(zip(self.gradient_holders, tvars))
开发者ID:dangraf,项目名称:PycharmProjects,代码行数:27,代码来源:cartpole.py

示例8: __init__

 def __init__(self, actions, td_discount_rate = 0.99, learningRate= 0.0001, epsilonGreedy = 0.1):
     self.learningRate = learningRate
     self.td_discount_rate = td_discount_rate
     self.epsilonGreedy = epsilonGreedy
     
     self.input = tf.placeholder('float', shape=[None,4])      
     x1 = slim.fully_connected(self.input, 32, scope='fc/fc_1')
     x1 = tf.nn.relu(x1)
     self.Qout = slim.fully_connected(x1, actions)
     
     self.predict = tf.argmax(self.Qout,1)
     self.logQVal = tf.summary.scalar('QVal', tf.reduce_mean(self.predict) )
     
     # get the best action q values 
     self.newQout = tf.placeholder(shape=[None,2],dtype=tf.float32)
     self.epsilonInput = tf.placeholder(dtype=tf.float32, name="epsilonInput")
     self.newstateReward = tf.placeholder(shape=[None],dtype=tf.float32)
     self.tdTarget = self.newstateReward + td_discount_rate * np.amax(self.newQout)
     self.td_error = tf.square(self.tdTarget - np.amax(self.Qout))
     # trun into single scalar value 
     self.loss = tf.reduce_mean(self.td_error)        
     
     self.tdLogger= tf.summary.scalar('tdLoss', self.loss)
     self.tdTargetLogger= tf.summary.histogram('tdTarget', self.tdTarget)
     self.epsilonLogger= tf.summary.scalar('epsilon', self.epsilonInput)
     
     # minimize the loess (mean of td errors)
     self.trainer = tf.train.AdamOptimizer(learning_rate=self.learningRate)
     self.updateModel = self.trainer.minimize(self.loss)
     
     
     self.memory = Memory(memory_capacity)
开发者ID:flutist,项目名称:CartPole-v0,代码行数:32,代码来源:q-network.py

示例9: fprop

 def fprop(self, x, **kwargs):
     del kwargs
     with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
         net = slim.fully_connected(x, 60)
         logits = slim.fully_connected(net, 10, activation_fn=None)
         return {self.O_LOGITS: logits,
                 self.O_PROBS: tf.nn.softmax(logits)}
开发者ID:limin24kobe,项目名称:cleverhans,代码行数:7,代码来源:test_attacks.py

示例10: __init__

    def __init__(self,
                 env,
                 hidden_size=8,
                 learning_rate=0.01,
                 gamma=0.99):
        self.state_dim = env.observation_space.shape[0]
        self.action_dim = env.action_space.n
        self.gamma = gamma
        self.history = []

        # Define network
        self.state_in = tf.placeholder(shape=[None, self.state_dim], dtype=tf.float32)
        hidden = slim.fully_connected(self.state_in, hidden_size,
                                      biases_initializer=None,
                                      activation_fn=tf.nn.relu)
        self.output = slim.fully_connected(hidden, self.action_dim,
                                           biases_initializer=None,
                                           activation_fn=tf.nn.softmax)
        self.reward = tf.placeholder(shape=[None], dtype=tf.float32)
        self.actual_action = tf.placeholder(shape=[None], dtype=tf.int32)
        self.indexes = tf.range(0, tf.shape(self.output)[0]) * self.action_dim \
                       + self.actual_action
        self.actual_output = tf.gather(tf.reshape(self.output, [-1]), self.indexes)
        self.loss = -tf.reduce_mean(tf.log(self.actual_output)*self.reward)

        self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        self.train_op = slim.learning.create_train_op(self.loss, self.optimizer)

        self.session = tf.InteractiveSession()
        self.session.run(tf.initialize_all_variables())
开发者ID:yaoyaowd,项目名称:tensorflow_demo,代码行数:30,代码来源:2_dqn.py

示例11: localization_VGG16

	def localization_VGG16(self,inputs):

		with tf.variable_scope('localization_network'):
			with slim.arg_scope([slim.conv2d, slim.fully_connected],
								 activation_fn = tf.nn.relu,
								 weights_initializer = tf.constant_initializer(0.0)):
				
				net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
				net = slim.max_pool2d(net, [2, 2], scope='pool1')
				net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
				net = slim.max_pool2d(net, [2, 2], scope='pool2')
				net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
				net = slim.max_pool2d(net, [2, 2], scope='pool3')
				net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
				net = slim.max_pool2d(net, [2, 2], scope='pool4')
				net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
				net = slim.max_pool2d(net, [2, 2], scope='pool5')
				shape = int(np.prod(net.get_shape()[1:]))

				net = slim.fully_connected(tf.reshape(net, [-1, shape]), 4096, scope='fc6')
				net = slim.fully_connected(net, 1024, scope='fc7')
				identity = np.array([[1., 0., 0.],
									[0., 1., 0.]])
				identity = identity.flatten()
				net = slim.fully_connected(net, 6, biases_initializer = tf.constant_initializer(identity) , scope='fc8')
			
		return net
开发者ID:dmehr,项目名称:HyperFace-TensorFlow-implementation,代码行数:27,代码来源:model.py

示例12: network_det

	def network_det(self,inputs,reuse=False):

		if reuse:
			tf.get_variable_scope().reuse_variables()

		with slim.arg_scope([slim.conv2d, slim.fully_connected],
							 activation_fn = tf.nn.relu,
							 weights_initializer = tf.truncated_normal_initializer(0.0, 0.01)):
			
			conv1 = slim.conv2d(inputs, 96, [11,11], 4, padding= 'VALID', scope='conv1')
			max1 = slim.max_pool2d(conv1, [3,3], 2, padding= 'VALID', scope='max1')

			conv2 = slim.conv2d(max1, 256, [5,5], 1, scope='conv2')
			max2 = slim.max_pool2d(conv2, [3,3], 2, padding= 'VALID', scope='max2')
			conv3 = slim.conv2d(max2, 384, [3,3], 1, scope='conv3')

			conv4 = slim.conv2d(conv3, 384, [3,3], 1, scope='conv4')
			conv5 = slim.conv2d(conv4, 256, [3,3], 1, scope='conv5')
			pool5 = slim.max_pool2d(conv5, [3,3], 2, padding= 'VALID', scope='pool5')
			
			shape = int(np.prod(pool5.get_shape()[1:]))
			fc6 = slim.fully_connected(tf.reshape(pool5, [-1, shape]), 4096, scope='fc6')
			
			fc_detection = slim.fully_connected(fc6, 512, scope='fc_det1')
			out_detection = slim.fully_connected(fc_detection, 2, scope='fc_det2', activation_fn = None)
			
		return out_detection
开发者ID:dmehr,项目名称:HyperFace-TensorFlow-implementation,代码行数:27,代码来源:model_prediction.py

示例13: _init

    def _init(self, inputs, num_outputs, options):
        hiddens = options.get("fcnet_hiddens", [256, 256])

        fcnet_activation = options.get("fcnet_activation", "tanh")
        if fcnet_activation == "tanh":
            activation = tf.nn.tanh
        elif fcnet_activation == "relu":
            activation = tf.nn.relu

        with tf.name_scope("fc_net"):
            i = 1
            last_layer = inputs
            for size in hiddens:
                label = "fc{}".format(i)
                last_layer = slim.fully_connected(
                    last_layer, size,
                    weights_initializer=normc_initializer(1.0),
                    activation_fn=activation,
                    scope=label)
                i += 1
            label = "fc_out"
            output = slim.fully_connected(
                last_layer, num_outputs,
                weights_initializer=normc_initializer(0.01),
                activation_fn=None, scope=label)
            return output, last_layer
开发者ID:adgirish,项目名称:ray,代码行数:26,代码来源:fcnet.py

示例14: encoder

    def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = images
                    
                    net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')
                    
                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')

                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')

                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')
                    
                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2
开发者ID:NickyGeorge,项目名称:facenet,代码行数:29,代码来源:dfc_vae_resnet.py

示例15: build_decoder_rnn

    def build_decoder_rnn(self, first_step):

        with tf.variable_scope("cnn"):
            image_emb = slim.fully_connected(self.fc7, self.input_encoding_size, reuse=True, activation_fn=None, scope='encode_image')
        with tf.variable_scope("rnnlm"):
            if first_step:
                rnn_input = image_emb # At the first step, the input is the embedded image
            else:
                # The input of later time step, is the embedding of the previous word
                # The previous word is a placeholder
                self.decoder_prev_word = tf.placeholder(tf.int32, [None])
                rnn_input = tf.nn.embedding_lookup(self.Wemb, self.decoder_prev_word)

            batch_size = tf.shape(rnn_input)[0]

            tf.get_variable_scope().reuse_variables()

            if not first_step:
                # If not first step, the states are also placeholders.
                self.decoder_initial_state = initial_state = utils.get_placeholder_state(self.cell.state_size)
                self.decoder_flattened_state = utils.flatten_state(initial_state)
            else:
                # The states for the first step are zero.
                initial_state = self.cell.zero_state(batch_size, tf.float32)

            outputs, state = tf.contrib.legacy_seq2seq.rnn_decoder([rnn_input], initial_state, self.cell)
            logits = slim.fully_connected(outputs[0], self.vocab_size + 1, activation_fn = None, scope = 'logit')
            decoder_probs = tf.reshape(tf.nn.softmax(logits), [batch_size, self.vocab_size + 1])
            decoder_state = utils.flatten_state(state)
        # output the current word distribution and states
        return [decoder_probs, decoder_state]
开发者ID:ruotianluo,项目名称:neuraltalk2-tensorflow,代码行数:31,代码来源:ShowTellModel.py


注:本文中的tensorflow.contrib.slim.fully_connected函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。